diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computebackendbuckets.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computebackendbuckets.compute.cnrm.cloud.google.com.yaml index d857aa62c4..51ac66c6c0 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computebackendbuckets.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computebackendbuckets.compute.cnrm.cloud.google.com.yaml @@ -89,6 +89,19 @@ spec: cdnPolicy: description: Cloud CDN configuration for this Backend Bucket. properties: + bypassCacheOnRequestHeaders: + description: Bypass the cache when the specified request headers + are matched - e.g. Pragma or Authorization headers. Up to 5 + headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode + settings. + items: + properties: + headerName: + description: The header field name to match on when bypassing + cache. Values are case-insensitive. + type: string + type: object + type: array cacheKeyPolicy: description: The CacheKeyPolicy for this CdnPolicy. properties: @@ -149,6 +162,10 @@ spec: type: integer type: object type: array + requestCoalescing: + description: If true then Cloud CDN will combine multiple concurrent + cache fill requests into a small number of requests to the origin. + type: boolean serveWhileStale: description: Serve existing content from the cache (if available) when revalidating content with the origin, or when an error diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computebackendservices.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computebackendservices.compute.cnrm.cloud.google.com.yaml index 6ead33e872..50b54fa574 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computebackendservices.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computebackendservices.compute.cnrm.cloud.google.com.yaml @@ -77,7 +77,10 @@ spec: For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) - and CONNECTION (for TCP/SSL). Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]. + and CONNECTION (for TCP/SSL). + + See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) + for an explanation of load balancing modes. Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]. type: string capacityScaler: description: |- diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstancegroupmanagers.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstancegroupmanagers.compute.cnrm.cloud.google.com.yaml index b9e21da5a8..e745f8540c 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstancegroupmanagers.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstancegroupmanagers.compute.cnrm.cloud.google.com.yaml @@ -242,7 +242,7 @@ spec: properties: external: description: |- - The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account: {projectNumber}@cloudservices.gserviceaccount.com is used. + The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account {projectNumber}@cloudservices.gserviceaccount.com is used. Allowed value: The `email` field of an `IAMServiceAccount` resource. type: string diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstances.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstances.compute.cnrm.cloud.google.com.yaml index 69e87707eb..7d5a3374aa 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstances.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstances.compute.cnrm.cloud.google.com.yaml @@ -759,6 +759,10 @@ spec: description: Specifies if the instance should be restarted if it was terminated by Compute Engine (not a user). type: boolean + instanceTerminationAction: + description: Specifies the action GCE should take when SPOT VM + is preempted. + type: string minNodeCpus: type: integer nodeAffinities: diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstancetemplates.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstancetemplates.compute.cnrm.cloud.google.com.yaml index f53a546cc9..fb5729f794 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstancetemplates.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeinstancetemplates.compute.cnrm.cloud.google.com.yaml @@ -580,6 +580,10 @@ spec: be automatically restarted if it is terminated by Compute Engine (not terminated by a user). This defaults to true. type: boolean + instanceTerminationAction: + description: Immutable. Specifies the action GCE should take when + SPOT VM is preempted. + type: string minNodeCpus: description: Minimum number of cpus for the instance. type: integer diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computenetworkendpointgroups.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computenetworkendpointgroups.compute.cnrm.cloud.google.com.yaml index 72d21a4f35..2bdcc7a6ed 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computenetworkendpointgroups.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computenetworkendpointgroups.compute.cnrm.cloud.google.com.yaml @@ -81,7 +81,9 @@ spec: Note that NON_GCP_PRIVATE_IP_PORT can only be used with Backend Services that 1) have the following load balancing schemes: EXTERNAL, EXTERNAL_MANAGED, INTERNAL_MANAGED, and INTERNAL_SELF_MANAGED and 2) support the RATE or - CONNECTION balancing modes. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"]. + CONNECTION balancing modes. + + Possible values include: GCE_VM_IP, GCE_VM_IP_PORT, and NON_GCP_PRIVATE_IP_PORT. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP", "GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"]. type: string networkRef: description: |- diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeregionnetworkendpointgroups.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeregionnetworkendpointgroups.compute.cnrm.cloud.google.com.yaml index 5643d3dd26..7d69be2d78 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeregionnetworkendpointgroups.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computeregionnetworkendpointgroups.compute.cnrm.cloud.google.com.yaml @@ -174,6 +174,37 @@ spec: endpoint group. Defaults to SERVERLESS Default value: "SERVERLESS" Possible values: ["SERVERLESS", "PRIVATE_SERVICE_CONNECT"].' type: string + networkRef: + description: |- + Immutable. This field is only used for PSC. + The URL of the network to which all network endpoints in the NEG belong. Uses + "default" project network if unspecified. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: 'Allowed value: The `selfLink` field of a `ComputeNetwork` + resource.' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object pscTargetService: description: |- Immutable. The target service url used to set up private service connection to @@ -188,6 +219,36 @@ spec: creation and acquisition. When unset, the value of `metadata.name` is used as the default. type: string + subnetworkRef: + description: |- + Immutable. This field is only used for PSC. + Optional URL of the subnetwork to which all network endpoints in the NEG belong. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: 'Allowed value: The `selfLink` field of a `ComputeSubnetwork` + resource.' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object required: - region type: object diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml index 602e8906e5..0166f19197 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_computetargethttpsproxies.compute.cnrm.cloud.google.com.yaml @@ -58,6 +58,40 @@ spec: type: object spec: properties: + certificateMapRef: + description: |- + Only the `external` field is supported to configure the reference. + + A reference to the CertificateMap resource uri that identifies a + certificate map associated with the given target proxy. This field + can only be set for global target proxies. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: 'Allowed value: string of the format `//certificatemanager.googleapis.com/projects/{{project}}/locations/{{location}}/certificateMaps/{{value}}`, + where {{value}} is the `name` field of a `CertificateManagerCertificateMap` + resource.' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object description: description: Immutable. An optional description of this resource. type: string @@ -181,7 +215,6 @@ spec: type: object required: - location - - sslCertificates - urlMapRef type: object status: diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_containerclusters.container.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_containerclusters.container.cnrm.cloud.google.com.yaml index 5a303e715a..f15426a8dc 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_containerclusters.container.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_containerclusters.container.cnrm.cloud.google.com.yaml @@ -92,7 +92,7 @@ spec: gcePersistentDiskCsiDriverConfig: description: Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. - Defaults to disabled; set enabled = true to enable. + Defaults to enabled; set disabled = true to disable. properties: enabled: type: boolean @@ -182,17 +182,28 @@ spec: type: object type: object authenticatorGroupsConfig: - description: Immutable. Configuration for the Google Groups for GKE - feature. + description: Configuration for the Google Groups for GKE feature. properties: securityGroup: - description: Immutable. The name of the RBAC security group for - use with Google security groups in Kubernetes RBAC. Group name - must be in format gke-security-groups@yourdomain.com. + description: The name of the RBAC security group for use with + Google security groups in Kubernetes RBAC. Group name must be + in format gke-security-groups@yourdomain.com. type: string required: - securityGroup type: object + binaryAuthorization: + description: Configuration options for the Binary Authorization feature. + properties: + enabled: + description: DEPRECATED. Deprecated in favor of evaluation_mode. + Enable Binary Authorization for this cluster. + type: boolean + evaluationMode: + description: Mode of operation for Binary Authorization policy + evaluation. + type: string + type: object clusterAutoscaling: description: Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to automatically adjust the size of the cluster @@ -203,6 +214,36 @@ spec: autoProvisioningDefaults: description: Contains defaults for a node pool created by NAP. properties: + bootDiskKMSKeyRef: + description: |- + Immutable. The Customer Managed Encryption Key used to encrypt the + boot disk attached to each node in the node pool. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: 'Allowed value: The `selfLink` field of a + `KMSCryptoKey` resource.' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object imageType: description: The default image type used by NAP once a new node pool is being created. @@ -369,8 +410,9 @@ spec: description: Immutable. Enable Autopilot for this cluster. type: boolean enableBinaryAuthorization: - description: Enable Binary Authorization for this cluster. If enabled, - all container images will be validated by Google Binary Authorization. + description: DEPRECATED. Deprecated in favor of binary_authorization. + Enable Binary Authorization for this cluster. If enabled, all container + images will be validated by Google Binary Authorization. type: boolean enableIntranodeVisibility: description: Whether Intra-node visibility is enabled for this cluster. @@ -633,6 +675,18 @@ spec: type: object type: array type: object + meshCertificates: + description: If set, and enable_certificates=true, the GKE Workload + Identity Certificates controller and node agent will be deployed + in the cluster. + properties: + enableCertificates: + description: When enabled the GKE Workload Identity Certificates + controller and node agent will be deployed in the cluster. + type: boolean + required: + - enableCertificates + type: object minMasterVersion: description: The minimum version of the master. GKE will auto-update the master to new versions, so this does not guarantee the current @@ -646,7 +700,8 @@ spec: properties: enableComponents: description: GKE components exposing metrics. Valid values include - SYSTEM_COMPONENTS and WORKLOADS. + SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, SCHEDULER, + and WORKLOADS. items: type: string type: array @@ -1065,18 +1120,18 @@ spec: nodes. properties: enablePrivateEndpoint: - description: Immutable. Enables the private cluster feature, creating - a private endpoint on the cluster. In a private cluster, nodes - only have RFC 1918 private addresses and communicate with the - master's private endpoint via private networking. - type: boolean - enablePrivateNodes: description: Immutable. When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. This field only applies to private clusters, when enable_private_nodes is true. type: boolean + enablePrivateNodes: + description: Immutable. Enables the private cluster feature, creating + a private endpoint on the cluster. In a private cluster, nodes + only have RFC 1918 private addresses and communicate with the + master's private endpoint via private networking. + type: boolean masterGlobalAccessConfig: description: Controls cluster master global access settings. properties: diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_dataprocworkflowtemplates.dataproc.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_dataprocworkflowtemplates.dataproc.cnrm.cloud.google.com.yaml index 779c5af8a7..b0b47d3d49 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_dataprocworkflowtemplates.dataproc.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_dataprocworkflowtemplates.dataproc.cnrm.cloud.google.com.yaml @@ -177,7 +177,7 @@ spec: Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to - specify a HiveJob: "hiveJob": { "queryList": { "queries": + specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' items: type: string @@ -254,7 +254,7 @@ spec: Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to - specify a HiveJob: "hiveJob": { "queryList": { "queries": + specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' items: type: string @@ -329,7 +329,7 @@ spec: Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to - specify a HiveJob: "hiveJob": { "queryList": { "queries": + specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' items: type: string @@ -585,7 +585,7 @@ spec: Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to - specify a HiveJob: "hiveJob": { "queryList": { "queries": + specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' items: type: string diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_gkehubmemberships.gkehub.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_gkehubmemberships.gkehub.cnrm.cloud.google.com.yaml index 549c7edcce..6277f8386d 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_gkehubmemberships.gkehub.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_gkehubmemberships.gkehub.cnrm.cloud.google.com.yaml @@ -183,12 +183,12 @@ spec: `issuer` in the workload identity pool. type: string workloadIdentityPool: - description: 'Output only. The name of the workload identity pool + description: Output only. The name of the workload identity pool in which `issuer` will be recognized. There is a single Workload Identity Pool per Hub that is shared between all Memberships - that belong to that Hub. For a Hub hosted in: {PROJECT_ID}, - the workload pool format is `{PROJECT_ID}.hub.id.goog`, although - this is subject to change in newer versions of this API.' + that belong to that Hub. For a Hub hosted in {PROJECT_ID}, the + workload pool format is `{PROJECT_ID}.hub.id.goog`, although + this is subject to change in newer versions of this API. type: string type: object conditions: diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_identityplatformoauthidpconfigs.identityplatform.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_identityplatformoauthidpconfigs.identityplatform.cnrm.cloud.google.com.yaml index a03257f255..17b626fac4 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_identityplatformoauthidpconfigs.identityplatform.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_identityplatformoauthidpconfigs.identityplatform.cnrm.cloud.google.com.yaml @@ -115,9 +115,9 @@ spec: is used as the default. type: string responseType: - description: 'The multiple response type to request for in the OAuth + description: The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits - (e.g.: {id\_token, token}).' + (e.g. {id\_token, token}). properties: code: description: If true, authorization code is returned from IdP's diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_identityplatformtenantoauthidpconfigs.identityplatform.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_identityplatformtenantoauthidpconfigs.identityplatform.cnrm.cloud.google.com.yaml index ef87840b57..c66ffdbe58 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_identityplatformtenantoauthidpconfigs.identityplatform.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_identityplatformtenantoauthidpconfigs.identityplatform.cnrm.cloud.google.com.yaml @@ -115,9 +115,9 @@ spec: is used as the default. type: string responseType: - description: 'The multiple response type to request for in the OAuth + description: The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits - (e.g.: {id\_token, token}).' + (e.g. {id\_token, token}). properties: code: description: If true, authorization code is returned from IdP's diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_kmscryptokeys.kms.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_kmscryptokeys.kms.cnrm.cloud.google.com.yaml index 4e298a0f01..6e742bd0a6 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_kmscryptokeys.kms.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_kmscryptokeys.kms.cnrm.cloud.google.com.yaml @@ -99,7 +99,7 @@ spec: description: |- Immutable. The immutable purpose of this CryptoKey. See the [purpose reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) - for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT"]. + for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", "MAC"]. type: string resourceID: description: Immutable. Optional. The name of the resource. Used for @@ -129,7 +129,7 @@ spec: protectionLevel: description: Immutable. The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", - "HSM", "EXTERNAL". Defaults to "SOFTWARE". + "HSM", "EXTERNAL", "EXTERNAL_VPC". Defaults to "SOFTWARE". type: string required: - algorithm diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_logginglogmetrics.logging.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_logginglogmetrics.logging.cnrm.cloud.google.com.yaml index 3a6d991661..ed9f2455b6 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_logginglogmetrics.logging.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_logginglogmetrics.logging.cnrm.cloud.google.com.yaml @@ -247,7 +247,7 @@ spec: rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. - The grammar for a unit is as follows: Expression = Component: + The grammar for a unit is as follows: Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringalertpolicies.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringalertpolicies.monitoring.cnrm.cloud.google.com.yaml index 670de77449..a37b229557 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringalertpolicies.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringalertpolicies.monitoring.cnrm.cloud.google.com.yaml @@ -289,6 +289,12 @@ spec: that unhealthy states are detected and alerted on quickly. type: string + evaluationMissingData: + description: |- + A condition control that determines how + metric-threshold conditions are evaluated when + data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]. + type: string query: description: Monitoring Query Language query that outputs a boolean stream. @@ -588,6 +594,12 @@ spec: that unhealthy states are detected and alerted on quickly. type: string + evaluationMissingData: + description: |- + A condition control that determines how + metric-threshold conditions are evaluated when + data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]. + type: string filter: description: |- A filter that identifies which time series diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml index cb35ed2a8b..fea1438866 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringdashboards.monitoring.cnrm.cloud.google.com.yaml @@ -179,7 +179,7 @@ spec: then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard - with the following four thresholds: { value: + with the following four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: @@ -1924,7 +1924,7 @@ spec: above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard - with the following four thresholds: { value: 90, category: + with the following four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: ''BELOW'', }, { value: 20, category: @@ -3469,7 +3469,7 @@ spec: then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard - with the following four thresholds: { value: + with the following four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: @@ -5187,7 +5187,7 @@ spec: then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard - with the following four thresholds: { value: + with the following four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringmetricdescriptors.monitoring.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringmetricdescriptors.monitoring.cnrm.cloud.google.com.yaml index 078fe336bd..a45631ef3e 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringmetricdescriptors.monitoring.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_monitoringmetricdescriptors.monitoring.cnrm.cloud.google.com.yaml @@ -198,17 +198,16 @@ spec: `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The - grammar for a unit is as follows: Expression = Component: { "." - Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | - "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME - "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. - If the annotation is used alone, then the unit is equivalent to - `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. - * `NAME` is a sequence of non-blank printable ASCII characters not - containing `{` or `}`. * `1` represents a unitary [dimensionless - unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, - such as in `1/s`. It is typically used when none of the basic units - are appropriate. For example, "new users per day" can be represented + grammar for a unit is as follows: Expression = Component { "." Component + } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation + ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` + is just a comment if it follows a `UNIT`. If the annotation is used + alone, then the unit is equivalent to `1`. For examples, `{request}/s + == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of + non-blank printable ASCII characters not containing `{` or `}`. + * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) + of 1, such as in `1/s`. It is typically used when none of the basic + units are appropriate. For example, "new users per day" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new users). Alternatively, "thousands of page views per day" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_osconfigguestpolicies.osconfig.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_osconfigguestpolicies.osconfig.cnrm.cloud.google.com.yaml index c50ed18865..7ca56f7cdf 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_osconfigguestpolicies.osconfig.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_osconfigguestpolicies.osconfig.cnrm.cloud.google.com.yaml @@ -376,9 +376,9 @@ spec: of the steps. type: string uri: - description: 'URI from which to fetch the object. - It should contain both the protocol and path following - the format: {protocol}://{location}.' + description: URI from which to fetch the object. It + should contain both the protocol and path following + the format {protocol}://{location}. type: string type: object type: object diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_pubsubsubscriptions.pubsub.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_pubsubsubscriptions.pubsub.cnrm.cloud.google.com.yaml index 848386e067..7f3368a032 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_pubsubsubscriptions.pubsub.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_pubsubsubscriptions.pubsub.cnrm.cloud.google.com.yaml @@ -79,6 +79,58 @@ spec: If the subscriber never acknowledges the message, the Pub/Sub system will eventually redeliver the message. type: integer + bigqueryConfig: + description: |- + If delivery to BigQuery is used with this subscription, this field is used to configure it. + Either pushConfig or bigQueryConfig can be set, but not both. + If both are empty, then the subscriber will pull and ack messages using API methods. + properties: + dropUnknownFields: + description: |- + When true and useTopicSchema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. + Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog. + type: boolean + tableRef: + description: The name of the table to which to write data. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: 'Allowed value: string of the format `{{project}}.{{dataset_id}}.{{value}}`, + where {{value}} is the `name` field of a `BigQueryTable` + resource.' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object + useTopicSchema: + description: When true, use the topic's schema as the columns + to write to in BigQuery, if it exists. + type: boolean + writeMetadata: + description: |- + When true, write the subscription name, messageId, publishTime, attributes, and orderingKey to additional columns in the table. + The subscription name, messageId, and publishTime fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. + type: boolean + required: + - tableRef + type: object deadLetterPolicy: description: |- A policy that specifies the conditions for dead lettering messages in diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_redisinstances.redis.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_redisinstances.redis.cnrm.cloud.google.com.yaml index 7cf4e5b7b3..8ef00f4d46 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_redisinstances.redis.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_redisinstances.redis.cnrm.cloud.google.com.yaml @@ -110,6 +110,37 @@ spec: Default value: "DIRECT_PEERING" Possible values: ["DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS"].' type: string + customerManagedKeyRef: + description: |- + Immutable. Optional. The KMS key reference that you want to use to + encrypt the data at rest for this Redis instance. If this is + provided, CMEK is enabled. + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: 'Allowed value: The `selfLink` field of a `KMSCryptoKey` + resource.' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object displayName: description: An arbitrary and optional user-provided name for the instance. diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_runservices.run.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_runservices.run.cnrm.cloud.google.com.yaml index 8c731b5410..5d26213a16 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_runservices.run.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_runservices.run.cnrm.cloud.google.com.yaml @@ -232,7 +232,7 @@ spec: properties: external: description: |- - Required. The name of the secret in Cloud Secret Manager. Format: {secret_name} if the secret is in the same project. projects/{project}/secrets/{secret_name} if the secret is in a different project. + Required. The name of the secret in Cloud Secret Manager. Format {secret_name} if the secret is in the same project. projects/{project}/secrets/{secret_name} if the secret is in a different project. Allowed value: The Google Cloud resource name of a `SecretManagerSecret` resource (format: `projects/{{project}}/secrets/{{name}}`). type: string @@ -575,7 +575,7 @@ spec: properties: external: description: |- - Required. The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. + Required. The name of the secret in Cloud Secret Manager. Format {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. Allowed value: The Google Cloud resource name of a `SecretManagerSecret` resource (format: `projects/{{project}}/secrets/{{name}}`). type: string diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_spannerdatabases.spanner.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_spannerdatabases.spanner.cnrm.cloud.google.com.yaml index 35a0f22c21..94c54b5609 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_spannerdatabases.spanner.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_spannerdatabases.spanner.cnrm.cloud.google.com.yaml @@ -59,6 +59,9 @@ spec: spec: properties: databaseDialect: + description: |- + Immutable. The dialect of the Cloud Spanner Database. + If it is not provided, "GOOGLE_STANDARD_SQL" will be used. Possible values: ["GOOGLE_STANDARD_SQL", "POSTGRESQL"]. type: string ddl: description: |- @@ -138,6 +141,14 @@ spec: creation and acquisition. When unset, the value of `metadata.name` is used as the default. type: string + versionRetentionPeriod: + description: |- + The retention period for the database. The retention period must be between 1 hour + and 7 days, and can be specified in days, hours, minutes, or seconds. For example, + the values 1d, 24h, 1440m, and 86400s are equivalent. Default value is 1h. + If this property is used, you must avoid adding new DDL statements to 'ddl' that + update the database's version_retention_period. + type: string required: - instanceRef type: object diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_sqlinstances.sql.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_sqlinstances.sql.cnrm.cloud.google.com.yaml index cc54deea2a..33683e0129 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_sqlinstances.sql.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_sqlinstances.sql.cnrm.cloud.google.com.yaml @@ -219,7 +219,7 @@ spec: type: string rootPassword: description: Immutable. Initial root password. Required for MS SQL - Server, ignored by MySQL and PostgreSQL. + Server. oneOf: - not: required: @@ -326,7 +326,7 @@ spec: type: integer type: object collation: - description: The name of server instance collation. + description: Immutable. The name of server instance collation. type: string crashSafeReplication: description: |- @@ -447,6 +447,9 @@ spec: description: A Google App Engine application whose zone to remain in. Must be in the same region as this instance. type: string + secondaryZone: + description: The preferred Compute Engine zone for the secondary/failover. + type: string zone: description: The preferred compute engine zone. type: string @@ -466,6 +469,30 @@ spec: description: Receive updates earlier (canary) or later (stable). type: string type: object + passwordValidationPolicy: + properties: + complexity: + description: Password complexity. + type: string + disallowUsernameSubstring: + description: Disallow username as a part of the password. + type: boolean + enablePasswordPolicy: + description: Whether the password policy is enabled or not. + type: boolean + minLength: + description: Minimum number of characters allowed. + type: integer + passwordChangeInterval: + description: Minimum interval after which the password can + be changed. This flag is only supported for PostgresSQL. + type: string + reuseInterval: + description: Number of previous passwords that cannot be reused. + type: integer + required: + - enablePasswordPolicy + type: object pricingPlan: description: Pricing plan for this instance, can only be PER_USE. type: string @@ -474,6 +501,49 @@ spec: DEPRECATED. This property is only applicable to First Generation instances, and First Generation instances are now deprecated. see https://cloud.google.com/sql/docs/mysql/deprecation-notice for information on how to upgrade to Second Generation instances. Specifying this field has no-ops; it's recommended to remove this field from your configuration. type: string + sqlServerAuditConfig: + properties: + bucketRef: + description: The name of the destination bucket (e.g., gs://mybucket). + oneOf: + - not: + required: + - external + required: + - name + - not: + anyOf: + - required: + - name + - required: + - namespace + required: + - external + properties: + external: + description: 'Allowed value: The `url` field of a `StorageBucket` + resource.' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + type: object + retentionInterval: + description: 'How long to keep generated audit files. A duration + in seconds with up to nine fractional digits, terminated + by ''s''. Example: "3.5s"..' + type: string + uploadInterval: + description: 'How often to upload generated audit files. A + duration in seconds with up to nine fractional digits, terminated + by ''s''. Example: "3.5s".' + type: string + required: + - bucketRef + type: object tier: description: The machine type to use. See tiers for more details and supported versions. Postgres supports only shared-core machine diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_storagebuckets.storage.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_storagebuckets.storage.cnrm.cloud.google.com.yaml index e59bc78c10..62cd9a7117 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_storagebuckets.storage.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_storagebuckets.storage.cnrm.cloud.google.com.yaml @@ -175,6 +175,12 @@ spec: timestamp of an object. This\n\t\t\t\t\t\t\t\t\t\tcondition is relevant only for versioned objects." type: integer + matchesPrefix: + description: One or more matching name prefixes to satisfy + this condition. + items: + type: string + type: array matchesStorageClass: description: 'Storage Class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, @@ -182,6 +188,12 @@ spec: items: type: string type: array + matchesSuffix: + description: One or more matching name suffixes to satisfy + this condition. + items: + type: string + type: array noncurrentTimeBefore: description: Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition. diff --git a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_vpcaccessconnectors.vpcaccess.cnrm.cloud.google.com.yaml b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_vpcaccessconnectors.vpcaccess.cnrm.cloud.google.com.yaml index 19eb7c9ff8..1a6631775f 100644 --- a/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_vpcaccessconnectors.vpcaccess.cnrm.cloud.google.com.yaml +++ b/config/crds/resources/apiextensions.k8s.io_v1_customresourcedefinition_vpcaccessconnectors.vpcaccess.cnrm.cloud.google.com.yaml @@ -177,7 +177,7 @@ spec: properties: external: description: |- - Subnet name (relative, not fully qualified). E.g. if the full subnet selfLink is https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName} the correct input for this field would be: {subnetName} + Subnet name (relative, not fully qualified). E.g. if the full subnet selfLink is https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName} the correct input for this field would be {subnetName} Allowed value: The Google Cloud resource name of a `ComputeSubnetwork` resource (format: `projects/{{project}}/regions/{{region}}/subnetworks/{{name}}`). type: string diff --git a/config/servicemappings/compute.yaml b/config/servicemappings/compute.yaml index 88fc792ec4..3be8f5957e 100644 --- a/config/servicemappings/compute.yaml +++ b/config/servicemappings/compute.yaml @@ -83,6 +83,13 @@ spec: containers: - type: project tfField: project + iamConfig: + policyName: google_compute_backend_bucket_iam_policy + policyMemberName: google_compute_backend_bucket_iam_member + referenceField: + name: name + type: name + supportsConditions: false - name: google_compute_backend_service kind: ComputeBackendService metadataMapping: @@ -163,7 +170,7 @@ spec: gvk: kind: NetworkSecurityClientTLSPolicy version: v1beta1 - group: networksecurity.cnrm.cloud.google.com/v1beta1 + group: networksecurity.cnrm.cloud.google.com - tfField: iap.oauth2_client_id description: |- Only `external` field is supported to configure the reference. @@ -1398,6 +1405,27 @@ spec: version: v1beta1 group: cloudfunctions.cnrm.cloud.google.com targetField: name + - key: networkRef + tfField: network + description: |- + Immutable. This field is only used for PSC. + The URL of the network to which all network endpoints in the NEG belong. Uses + "default" project network if unspecified. + gvk: + kind: ComputeNetwork + version: v1beta1 + group: compute.cnrm.cloud.google.com + targetField: self_link + - key: subnetworkRef + tfField: subnetwork + description: |- + Immutable. This field is only used for PSC. + Optional URL of the subnetwork to which all network endpoints in the NEG belong. + gvk: + kind: ComputeSubnetwork + version: v1beta1 + group: compute.cnrm.cloud.google.com + targetField: self_link ignoredFields: # As of 5/19/22 the only allowed value for serverlessDeployment.platform is `apigateway.googleapis.com` # This field is ignored because APIGateway is not a supported resource at this time @@ -1950,6 +1978,13 @@ spec: containers: - type: project tfField: project + iamConfig: + policyName: google_compute_snapshot_iam_policy + policyMemberName: google_compute_snapshot_iam_member + referenceField: + name: name + type: name + supportsConditions: false - name: google_compute_ssl_certificate kind: ComputeSSLCertificate metadataMapping: @@ -2097,6 +2132,20 @@ spec: version: v1beta1 group: compute.cnrm.cloud.google.com targetField: self_link + - key: certificateMapRef + tfField: certificate_map + # TODO (b/203667132): Fix the reference config after CertificateManagerCertificateMap is supported. + description: |- + Only the `external` field is supported to configure the reference. + + A reference to the CertificateMap resource uri that identifies a + certificate map associated with the given target proxy. This field + can only be set for global target proxies. + gvk: + kind: CertificateManagerCertificateMap + version: v1beta1 + group: certificatemanager.cnrm.cloud.google.com + valueTemplate: "//certificatemanager.googleapis.com/projects/{{project}}/locations/{{location}}/certificateMaps/{{value}}" containers: - type: project tfField: project diff --git a/config/servicemappings/container.yaml b/config/servicemappings/container.yaml index e715aa3d27..2ddb4ae231 100644 --- a/config/servicemappings/container.yaml +++ b/config/servicemappings/container.yaml @@ -96,6 +96,16 @@ spec: kind: ComputeNodeGroup version: v1beta1 group: compute.cnrm.cloud.google.com + - tfField: cluster_autoscaling.auto_provisioning_defaults.boot_disk_kms_key + key: bootDiskKMSKeyRef + description: |- + Immutable. The Customer Managed Encryption Key used to encrypt the + boot disk attached to each node in the node pool. + gvk: + kind: KMSCryptoKey + version: v1beta1 + group: kms.cnrm.cloud.google.com + targetField: self_link - name: google_container_node_pool kind: ContainerNodePool idTemplate: "{{project}}/{{location}}/{{cluster}}/{{name}}" diff --git a/config/servicemappings/pubsub.yaml b/config/servicemappings/pubsub.yaml index 08b5a8c077..45706b73ff 100644 --- a/config/servicemappings/pubsub.yaml +++ b/config/servicemappings/pubsub.yaml @@ -76,6 +76,15 @@ spec: version: v1beta1 group: pubsub.cnrm.cloud.google.com valueTemplate: "projects/{{project}}/topics/{{value}}" + - tfField: bigquery_config.table + key: tableRef + description: |- + The name of the table to which to write data. + gvk: + kind: BigQueryTable + version: v1beta1 + group: bigquery.cnrm.cloud.google.com + valueTemplate: "{{project}}.{{dataset_id}}.{{value}}" containers: - type: project tfField: project diff --git a/config/servicemappings/redis.yaml b/config/servicemappings/redis.yaml index 0105d894ca..32aace29ec 100644 --- a/config/servicemappings/redis.yaml +++ b/config/servicemappings/redis.yaml @@ -43,6 +43,17 @@ spec: version: v1beta1 group: compute.cnrm.cloud.google.com targetField: self_link + - tfField: customer_managed_key + key: customerManagedKeyRef + description: |- + Immutable. Optional. The KMS key reference that you want to use to + encrypt the data at rest for this Redis instance. If this is + provided, CMEK is enabled. + gvk: + kind: KMSCryptoKey + version: v1beta1 + group: kms.cnrm.cloud.google.com + targetField: self_link containers: - type: project tfField: project diff --git a/config/servicemappings/sql.yaml b/config/servicemappings/sql.yaml index 90ec05570f..a4a2237a04 100644 --- a/config/servicemappings/sql.yaml +++ b/config/servicemappings/sql.yaml @@ -74,6 +74,14 @@ spec: version: v1beta1 group: kms.cnrm.cloud.google.com targetField: self_link + - tfField: settings.sql_server_audit_config.bucket + key: bucketRef + description: The name of the destination bucket (e.g., gs://mybucket). + gvk: + kind: StorageBucket + version: v1beta1 + group: storage.cnrm.cloud.google.com + targetField: url ignoredFields: - settings.version - deletion_protection diff --git a/config/tests/servicemapping/servicemapping_test.go b/config/tests/servicemapping/servicemapping_test.go index e9cc729378..7c24399db6 100644 --- a/config/tests/servicemapping/servicemapping_test.go +++ b/config/tests/servicemapping/servicemapping_test.go @@ -396,15 +396,21 @@ func validateTypeConfigGVK(t *testing.T, rc v1alpha1.ResourceConfig, ref v1alpha // end code block // This list of ignored GVK is to allow certain resources to have - // external-only resource references (DCL-based resources). - ignoredGvkList := []k8sschema.GroupVersionKind{ + // external-only resource references (DCL-based resources or unsupported + // resources). + ignoredGVKList := []k8sschema.GroupVersionKind{ { - Group: "networksecurity.cnrm.cloud.google.com/v1beta1", + Group: "networksecurity.cnrm.cloud.google.com", Version: "v1beta1", Kind: "NetworkSecurityClientTLSPolicy", }, + { + Group: "certificatemanager.cnrm.cloud.google.com", + Version: "v1beta1", + Kind: "CertificateManagerCertificateMap", + }, } - for _, g := range ignoredGvkList { + for _, g := range ignoredGVKList { if gvk == g { return } diff --git a/go.mod b/go.mod index c95e6de8ab..a4235df00e 100644 --- a/go.mod +++ b/go.mod @@ -5,12 +5,12 @@ go 1.17 require ( cloud.google.com/go/profiler v0.1.0 contrib.go.opencensus.io/exporter/prometheus v0.1.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.15.2 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.16.0 github.com/Masterminds/sprig v2.22.0+incompatible github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 github.com/blang/semver v3.5.1+incompatible github.com/cenkalti/backoff v2.2.1+incompatible - github.com/fatih/color v1.12.0 + github.com/fatih/color v1.13.0 github.com/ghodss/yaml v1.0.0 github.com/go-logr/logr v1.2.0 github.com/go-logr/zapr v1.2.0 @@ -20,8 +20,8 @@ require ( github.com/gosimple/slug v1.9.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/hcl/v2 v2.12.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.16.0 + github.com/hashicorp/hcl/v2 v2.13.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.18.0 github.com/hashicorp/terraform-provider-google-beta v3.73.0+incompatible github.com/nasa9084/go-openapi v0.0.0-20200604141640-2875b7376353 github.com/olekukonko/tablewriter v0.0.5 @@ -118,18 +118,18 @@ require ( github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.2.0 // indirect + github.com/hashicorp/go-hclog v1.2.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.4.3 // indirect + github.com/hashicorp/go-plugin v1.4.4 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/go-version v1.4.0 // indirect - github.com/hashicorp/hc-install v0.3.2 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/hc-install v0.4.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.16.1 // indirect - github.com/hashicorp/terraform-json v0.13.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.9.0 // indirect - github.com/hashicorp/terraform-plugin-log v0.4.0 // indirect - github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896 // indirect + github.com/hashicorp/terraform-exec v0.17.2 // indirect + github.com/hashicorp/terraform-json v0.14.0 // indirect + github.com/hashicorp/terraform-plugin-go v0.10.0 // indirect + github.com/hashicorp/terraform-plugin-log v0.4.1 // indirect + github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect github.com/huandu/xstrings v1.3.2 // indirect @@ -143,8 +143,8 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.6 // indirect - github.com/mattn/go-colorable v0.1.8 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -169,7 +169,7 @@ require ( github.com/russross/blackfriday v1.5.2 // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/stretchr/testify v1.7.0 // indirect + github.com/stretchr/testify v1.7.2 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect @@ -178,7 +178,7 @@ require ( go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect + golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect golang.org/x/net v0.0.0-20220526153639-5463443f8c37 // indirect golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect @@ -189,11 +189,11 @@ require ( gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220527130721-00d5c0f3be58 // indirect - google.golang.org/grpc v1.46.2 // indirect + google.golang.org/grpc v1.47.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/cli-runtime v0.24.0 // indirect k8s.io/component-base v0.24.2 // indirect k8s.io/klog/v2 v2.60.1 // indirect diff --git a/go.sum b/go.sum index 71ba08337e..a78760d593 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,3 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= bitbucket.org/creachadair/stringset v0.0.8 h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0JvHgx5H+M= bitbucket.org/creachadair/stringset v0.0.8/go.mod h1:AgthVMyMxC/6FK1KBJ2ALdqkZObGN8hOetgpwXyMn34= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -15,7 +13,6 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= @@ -61,8 +58,6 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -71,7 +66,6 @@ cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09 cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg= contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -88,19 +82,14 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.11.0/go.mod h1:UJoDYx6t3+xCOd+dZX8+NrEB+Y/eW1pQlvxh2Gt7y5E= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.15.2 h1:FWEE9Fk1bC4oE4O1jvI7gskksmpUh/vpX9J+GPP/WhA= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.15.2/go.mod h1:i6Pmzp7aolLmJY86RaJ9wjqm/HFleMeN7Vl5uIWLwE8= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.16.0 h1:+zbrl0sUHK+oav4Nhru21AjJLPwnmQCL01oZYzYiPac= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.16.0/go.mod h1:i6Pmzp7aolLmJY86RaJ9wjqm/HFleMeN7Vl5uIWLwE8= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -109,48 +98,28 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= @@ -158,22 +127,11 @@ github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkE github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v0.0.0-20210308000810-4155955488a0/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -184,32 +142,25 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20210405163807-87a70f3d418b/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -226,22 +177,16 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/staticfile v0.1.2/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= @@ -249,28 +194,17 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -278,7 +212,6 @@ github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -289,14 +222,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= @@ -308,29 +237,21 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZM github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fvbommel/sortorder v1.0.1 h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= -github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= github.com/gammazero/deque v0.0.0-20190521012701-46e4ffb7a622 h1:lxbhOGZ9pU3Kf8P6lFluUcE82yVZn2EqEf4+mWRNPV0= github.com/gammazero/deque v0.0.0-20190521012701-46e4ffb7a622/go.mod h1:D90+MBHVc9Sk1lJAbEVgws0eYEurY4mv2TDso3Nxh3w= -github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= github.com/gammazero/workerpool v0.0.0-20190608213748-0ed5e40ec55e h1:fqgNEGLc7p2Rz4xlDHp9WNw/pqqR3c2cLdIC4zASBzU= github.com/gammazero/workerpool v0.0.0-20190608213748-0ed5e40ec55e/go.mod h1:avlwxCMavNtjwf7NrfnzdIGU3OZYI5D1NFQ2Rn3nHKg= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= @@ -339,21 +260,15 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -361,7 +276,6 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -371,86 +285,30 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY= github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= -github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -495,18 +353,7 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.40.1/go.mod h1:OyFTr1muxaWeGTcHQcL3B7C4rETnDphTKYenZDgH2/g= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -514,8 +361,6 @@ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -548,7 +393,6 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -563,8 +407,6 @@ github.com/google/pprof v0.0.0-20210804190019-f964ff605595/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -580,47 +422,25 @@ github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gookit/color v1.3.8/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosimple/slug v1.9.0 h1:r5vDcYrFz9BmfIAMC829un9hq7hKM4cHUrsv36LbEqs= github.com/gosimple/slug v1.9.0/go.mod h1:AMZ+sOVe65uByN3kgEyf9WEBKBCSS+dJjMX9x4vDJbg= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= -github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= @@ -631,16 +451,15 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= +github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= -github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-plugin v1.4.4 h1:NVdrSdFRt3SkZtNckJ6tog7gbpRrcbOjQi/rgF7JYWQ= +github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -649,76 +468,55 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= -github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hc-install v0.3.1/go.mod h1:3LCdWcCDS1gaHC9mhHCGbkYfoY6vdsKohGjugbZdZak= -github.com/hashicorp/hc-install v0.3.2 h1:oiQdJZvXmkNcRcEOOfM5n+VTsvNjWQeOjfAoO6dKSH8= -github.com/hashicorp/hc-install v0.3.2/go.mod h1:xMG6Tr8Fw1WFjlxH0A9v61cW15pFwgEGqEz0V4jisHs= +github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= +github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.12.0 h1:PsYxySWpMD4KPaoJLnsHwtK5Qptvj/4Q6s0t4sUxZf4= -github.com/hashicorp/hcl/v2 v2.12.0/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= +github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= +github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/terraform-exec v0.16.1 h1:NAwZFJW2L2SaCBVZoVaH8LPImLOGbPLkSHy0IYbs2uE= -github.com/hashicorp/terraform-exec v0.16.1/go.mod h1:aj0lVshy8l+MHhFNoijNHtqTJQI3Xlowv5EOsEaGO7M= -github.com/hashicorp/terraform-json v0.13.0 h1:Li9L+lKD1FO5RVFRM1mMMIBDoUHslOniyEi5CM+FWGY= -github.com/hashicorp/terraform-json v0.13.0/go.mod h1:y5OdLBCT+rxbwnpxZs9kGL7R9ExU76+cpdY8zHwoazk= -github.com/hashicorp/terraform-plugin-go v0.9.0 h1:FvLY/3z4SNVatPZdoFcyrlNbCar+WyyOTv5X4Tp+WZc= -github.com/hashicorp/terraform-plugin-go v0.9.0/go.mod h1:EawBkgjBWNf7jiKnVoyDyF39OSV+u6KUX+Y73EPj3oM= -github.com/hashicorp/terraform-plugin-log v0.3.0/go.mod h1:EjueSP/HjlyFAsDqt+okpCPjkT4NDynAe32AeDC4vps= -github.com/hashicorp/terraform-plugin-log v0.4.0 h1:F3eVnm8r2EfQCe2k9blPIiF/r2TT01SHijXnS7bujvc= -github.com/hashicorp/terraform-plugin-log v0.4.0/go.mod h1:9KclxdunFownr4pIm1jdmwKRmE4d6HVG2c9XDq47rpg= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.16.0 h1:9fjPgCenJqnbjo95SDcbJ+YdLyEC1N35cwKWcRWhJTQ= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.16.0/go.mod h1:hLa0sTiySU/AWEgV2GxJh0/pQIqcCmm30IPja9N9lTg= -github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896 h1:1FGtlkJw87UsTMg5s8jrekrHmUPUJaMcu6ELiVhQrNw= -github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896/go.mod h1:bzBPnUIkI0RxauU8Dqo+2KrZZ28Cf48s8V6IHt3p4co= +github.com/hashicorp/terraform-exec v0.17.2 h1:EU7i3Fh7vDUI9nNRdMATCEfnm9axzTnad8zszYZ73Go= +github.com/hashicorp/terraform-exec v0.17.2/go.mod h1:tuIbsL2l4MlwwIZx9HPM+LOV9vVyEfBYu2GsO1uH3/8= +github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= +github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= +github.com/hashicorp/terraform-plugin-go v0.10.0 h1:FIQDt/AZDSOXnN+znBnLLZA9aFk4/GwL40rwMLnvuTk= +github.com/hashicorp/terraform-plugin-go v0.10.0/go.mod h1:aphXBG8qtQH0yF1waMRlaw/3G+ZFlR/6Artnvt1QEDE= +github.com/hashicorp/terraform-plugin-log v0.4.1 h1:xpbmVhvuU3mgHzLetOmx9pkOL2rmgpu302XxddON6eo= +github.com/hashicorp/terraform-plugin-log v0.4.1/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.18.0 h1:/cdI5di5XA+N80gXzXF4YcHq36DprBskubk6Z8i26ZQ= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.18.0/go.mod h1:L3SHkD/Q8zPVgXviQmpVwy9nKwpXXZscVIpVEnQ/T50= +github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg= +github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jhump/protoreflect v1.6.1 h1:4/2yi5LyDPP7nN+Hiird1SAJ6YoxUm13/oxHGRnbPd8= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jingyugao/rowserrcheck v0.0.0-20210315055705-d907ca737bb1/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -726,7 +524,6 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -734,23 +531,15 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -763,82 +552,48 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= -github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= -github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.6/go.mod h1:Lj5gIVxjBlH8REa3icEOkdfchwYc291nShzZ4QYWyMo= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= @@ -850,8 +605,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= @@ -869,186 +622,107 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= -github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/nasa9084/go-openapi v0.0.0-20200604141640-2875b7376353 h1:B12KiVODig0ltyL7AOeO68ZKLrIWAus/SdlWvO7KcfA= github.com/nasa9084/go-openapi v0.0.0-20200604141640-2875b7376353/go.mod h1:Y+QYE2No9P7gTzq/clACcx4vZ34gemXUmfspIcRD6LY= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= -github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210418123303-74da32850375/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= -github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be h1:ta7tUOvsPHVHGom5hKW5VXNc2xZIkfCKP8iaqOyYtUQ= github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be/go.mod h1:MIDFMn7db1kT65GmV94GzpX9Qdi7N/pQlwb+AN8wh+Q= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sanposhiho/wastedassign v1.0.0/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= -github.com/securego/gosec/v2 v2.7.0/go.mod h1:xNbGArrGUspJLuz3LS5XCY1EBW/0vABAl/LWfSklmiM= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.4/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -1065,9 +739,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -1075,8 +746,6 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= @@ -1086,61 +755,29 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spyzhov/ajson v0.4.2/go.mod h1:63V+CGM6f1Bu/p4nLIN8885ojBdt88TbLoSFzyqMuVA= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tetafro/godot v1.4.6/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= -github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.1.0/go.mod h1:crK5eI4RGSUrb9duDTQ5GqcukbKZvi85vX6nbhsBAeI= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.3.2/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -1148,17 +785,11 @@ github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvC github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1167,18 +798,13 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.9.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0= github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= @@ -1190,12 +816,6 @@ go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46O go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1220,54 +840,38 @@ go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee33 go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 h1:O8uGbHCqlTp2P6QJSLmCojM4mN6UemYv8K+dCnmHmu0= +golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1278,7 +882,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1305,28 +908,22 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1334,8 +931,6 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1344,13 +939,11 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1404,7 +997,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1421,17 +1013,11 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1440,15 +1026,12 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1460,13 +1043,11 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1480,12 +1061,8 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1505,6 +1082,7 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1516,6 +1094,7 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1537,44 +1116,29 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1582,12 +1146,9 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1597,52 +1158,26 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2-0.20210512205948-8287d5da45e4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -1658,12 +1193,10 @@ golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3k golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -1703,27 +1236,21 @@ google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6r google.golang.org/api v0.82.0 h1:h6EGeZuzhoKSS7BUznzkW+2wHZ+4Ubd6rsVvvh3dRkw= google.golang.org/api v0.82.0/go.mod h1:Ld58BeTlL9DIYr2M2ajvoSqmGLei0BMn+kVBmkam1os= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1744,8 +1271,6 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1801,23 +1326,15 @@ google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220527130721-00d5c0f3be58 h1:a221mAAEAzq4Lz6ZWRkcS8ptb2mxoxYSt4N68aRyQHM= google.golang.org/genproto v0.0.0-20220527130721-00d5c0f3be58/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -1839,10 +1356,10 @@ google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1866,10 +1383,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -1888,20 +1403,18 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1909,45 +1422,33 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.4/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= -k8s.io/apiextensions-apiserver v0.24.0/go.mod h1:iuVe4aEpe6827lvO6yWQVxiPSpPoSKVjkq+MIdg84cM= k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= -k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= -k8s.io/apiserver v0.24.0/go.mod h1:WFx2yiOMawnogNToVvUYT9nn1jaIkMKj41ZYCVycsBA= k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= -k8s.io/cli-runtime v0.21.1/go.mod h1:TI9Bvl8lQWZB2KqE91QLCp9AZE4l29zNFnj/x4IX4Fw= k8s.io/cli-runtime v0.24.0 h1:ot3Qf49T852uEyNApABO1UHHpFIckKK/NqpheZYN2gM= k8s.io/cli-runtime v0.24.0/go.mod h1:9XxoZDsEkRFUThnwqNviqzljtT/LdHtNWvcNFrAXl0A= -k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw= k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/code-generator v0.24.2 h1:EGeRWzJrpwi6T6CvoNl0spM6fnAnOdCr0rz7H4NU1rk= k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= -k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= k8s.io/component-base v0.24.0/go.mod h1:Dgazgon0i7KYUsS8krG8muGiMVtUZxG037l1MKyXgrA= k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= -k8s.io/component-helpers v0.21.1/go.mod h1:FtC1flbiQlosHQrLrRUulnKxE4ajgWCGy/67fT2GRlQ= k8s.io/component-helpers v0.24.0/go.mod h1:Q2SlLm4h6g6lPTC9GMMfzdywfLSvJT2f1hOnnjaWD8c= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1959,37 +1460,25 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/kubectl v0.21.1/go.mod h1:PMYR88MqESuysBM/MX+Vu4JbX/50nY4d4kny+SPEI2U= k8s.io/kubectl v0.24.0 h1:nA+WtMLVdXUs4wLogGd1mPTAesnLdBpCVgCmz3I7dXo= k8s.io/kubectl v0.24.0/go.mod h1:pdXkmCyHiRTqjYfyUJiXtbVNURhv0/Q1TyRhy2d5ic0= -k8s.io/metrics v0.21.1/go.mod h1:pyDVLsLe++FIGDBFU80NcW4xMFsuiVTWL8Zfi7+PpNo= k8s.io/metrics v0.24.0/go.mod h1:jrLlFGdKl3X+szubOXPG0Lf2aVxuV3QJcbsgVRAM6fI= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210517184530-5a248b5acedc/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= -sigs.k8s.io/cli-utils v0.26.1/go.mod h1:myCFn83XMe7vC1ZX5CEJJIY2cqsl6IxYI727mLW1mfE= -sigs.k8s.io/controller-runtime v0.9.0-beta.5.0.20210524185538-7181f1162e79/go.mod h1:rgf+cBz72pYlKXDRNhI1WFQv/S86EMUV4/ySmsEYgHk= -sigs.k8s.io/controller-runtime v0.12.1/go.mod h1:BKhxlA4l7FPK4AQcsuL4X6vZeWnKDXez/vp1Y8dxTU0= sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE= sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= sigs.k8s.io/controller-tools v0.6.2 h1:+Y8L0UsAugDipGRw8lrkPoAi6XqlQVZuf1DQHME3PgU= @@ -1998,18 +1487,13 @@ sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87J sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/kubebuilder-declarative-pattern v0.11.20220513-0.20220713231456-2072cf081b2e h1:/tLdk1g49p7jDvsVy5cZe5tZ1cADiDDjI0g98gW70LI= sigs.k8s.io/kubebuilder-declarative-pattern v0.11.20220513-0.20220713231456-2072cf081b2e/go.mod h1:0rubPtFGOhNacREDF7leoBgWg/n9rKZYi27oAQ5/VyM= -sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY= sigs.k8s.io/kustomize/api v0.11.4 h1:/0Mr3kfBBNcNPOW5Qwk/3eb8zkswCwnqQxxKtmrTkRo= sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI= -sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIMgYrEDfwOeFdMK0= sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9WCER0uBS1SW2Rco= -sigs.k8s.io/kustomize/kustomize/v4 v4.1.2/go.mod h1:PxBvo4WGYlCLeRPL+ziT64wBXqbgfcalOS/SXa/tcyo= sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg= -sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= sigs.k8s.io/kustomize/kyaml v0.13.6 h1:eF+wsn4J7GOAXlvajv6OknSunxpcOBQQqsnPxObtkGs= sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= @@ -2017,4 +1501,3 @@ sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/hack/terraform-overrides/container_cluster_ignore_tpuconfig.patch b/hack/terraform-overrides/container_cluster_ignore_tpuconfig.patch index 3d70a658e5..98caefcfde 100644 --- a/hack/terraform-overrides/container_cluster_ignore_tpuconfig.patch +++ b/hack/terraform-overrides/container_cluster_ignore_tpuconfig.patch @@ -1,8 +1,8 @@ diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go -index 253aed526..ab85197c0 100644 +index acb0ec0e6..50a1e8376 100755 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go -@@ -521,43 +521,11 @@ func resourceContainerCluster() *schema.Resource { +@@ -554,43 +554,11 @@ func resourceContainerCluster() *schema.Resource { }, "enable_tpu": { @@ -50,7 +50,7 @@ index 253aed526..ab85197c0 100644 }, "enable_legacy_abac": { -@@ -1658,10 +1626,6 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er +@@ -1707,10 +1675,6 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er cluster.IdentityServiceConfig = expandIdentityServiceConfig(v) } @@ -61,7 +61,7 @@ index 253aed526..ab85197c0 100644 if v, ok := d.GetOk("resource_usage_export_config"); ok { cluster.ResourceUsageExportConfig = expandResourceUsageExportConfig(v) } -@@ -3731,19 +3695,6 @@ func expandMonitoringConfig(configured interface{}) *container.MonitoringConfig +@@ -3879,19 +3843,6 @@ func expandMonitoringConfig(configured interface{}) *container.MonitoringConfig return mc } @@ -78,14 +78,14 @@ index 253aed526..ab85197c0 100644 - } -} - - func flattenNotificationConfig(c *container.NotificationConfig) []map[string]interface{} { - if c == nil { - return nil + func expandContainerClusterAuthenticatorGroupsConfig(configured interface{}) *container.AuthenticatorGroupsConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster_test.go -index 07f11d662..277722069 100644 +index 40780dde0..3cd3e3e74 100755 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster_test.go -@@ -2527,30 +2527,6 @@ func TestAccContainerCluster_withDNSConfig(t *testing.T) { +@@ -2714,30 +2714,6 @@ func TestAccContainerCluster_withDNSConfig(t *testing.T) { }) } @@ -116,7 +116,7 @@ index 07f11d662..277722069 100644 func testAccContainerCluster_masterAuthorizedNetworksDisabled(t *testing.T, resource_name string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resource_name] -@@ -5295,59 +5271,3 @@ resource "google_container_cluster" "primary" { +@@ -5581,59 +5557,3 @@ resource "google_container_cluster" "primary" { } `, name, name, name) } @@ -127,7 +127,7 @@ index 07f11d662..277722069 100644 - name = "%s" - auto_create_subnetworks = false -} -- +- -resource "google_compute_subnetwork" "container_subnetwork" { - name = google_compute_network.container_network.name - network = google_compute_network.container_network.name @@ -145,22 +145,22 @@ index 07f11d662..277722069 100644 - ip_cidr_range = "10.0.32.0/22" - } -} -- +- -resource "google_container_cluster" "with_tpu_config" { - name = "%s" - location = "us-central1-a" - initial_node_count = 1 -- -- +- +- - tpu_config { - enabled = true - use_service_networking = true - } -- +- - network = google_compute_network.container_network.name - subnetwork = google_compute_subnetwork.container_subnetwork.name - networking_mode = "VPC_NATIVE" -- +- - private_cluster_config { - enable_private_endpoint = true - enable_private_nodes = true @@ -168,7 +168,7 @@ index 07f11d662..277722069 100644 - } - master_authorized_networks_config { - } -- +- - ip_allocation_policy { - cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name - services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name diff --git a/hack/terraform-overrides/dataflow_job.patch b/hack/terraform-overrides/dataflow_job.patch index 1268487709..9480f705c8 100644 --- a/hack/terraform-overrides/dataflow_job.patch +++ b/hack/terraform-overrides/dataflow_job.patch @@ -13,7 +13,7 @@ # limitations under the License. diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go -index 6d542e022..a37861985 100644 +index dada1c0e9..24780eb4a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go @@ -1,13 +1,13 @@ @@ -68,17 +68,20 @@ index 6d542e022..a37861985 100644 func resourceDataflowJob() *schema.Resource { return &schema.Resource{ Create: resourceDataflowJobCreate, -@@ -56,9 +78,6 @@ func resourceDataflowJob() *schema.Resource { +@@ -56,12 +78,6 @@ func resourceDataflowJob() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Update: schema.DefaultTimeout(10 * time.Minute), }, - CustomizeDiff: customdiff.All( - resourceDataflowJobTypeCustomizeDiff, - ), +- Importer: &schema.ResourceImporter{ +- State: schema.ImportStatePassthrough, +- }, Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, -@@ -166,7 +185,7 @@ func resourceDataflowJob() *schema.Resource { +@@ -169,7 +185,7 @@ func resourceDataflowJob() *schema.Resource { "subnetwork": { Type: schema.TypeString, Optional: true, @@ -87,7 +90,7 @@ index 6d542e022..a37861985 100644 Description: `The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".`, }, -@@ -221,30 +240,6 @@ func resourceDataflowJob() *schema.Resource { +@@ -225,30 +241,6 @@ func resourceDataflowJob() *schema.Resource { } } @@ -118,7 +121,7 @@ index 6d542e022..a37861985 100644 // return true if a job is in a terminal state, OR if a job is in a // terminating state and skipWait is true func shouldStopDataflowJobDeleteQuery(state string, skipWait bool) bool { -@@ -286,11 +281,18 @@ func resourceDataflowJobCreate(d *schema.ResourceData, meta interface{}) error { +@@ -290,11 +282,18 @@ func resourceDataflowJobCreate(d *schema.ResourceData, meta interface{}) error { Environment: &env, } @@ -138,7 +141,7 @@ index 6d542e022..a37861985 100644 return resourceDataflowJobRead(d, meta) } -@@ -312,10 +314,22 @@ func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error { +@@ -316,10 +315,22 @@ func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error { return err } @@ -164,7 +167,7 @@ index 6d542e022..a37861985 100644 return handleNotFoundError(err, d, fmt.Sprintf("Dataflow job %s", id)) } -@@ -364,13 +378,17 @@ func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error { +@@ -368,13 +379,17 @@ func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("additional_experiments", optionsMap["experiments"]); err != nil { return fmt.Errorf("Error setting additional_experiments: %s", err) } @@ -187,7 +190,7 @@ index 6d542e022..a37861985 100644 return nil } -@@ -382,6 +400,10 @@ func resourceDataflowJobUpdateByReplacement(d *schema.ResourceData, meta interfa +@@ -386,6 +401,10 @@ func resourceDataflowJobUpdateByReplacement(d *schema.ResourceData, meta interfa return nil } @@ -198,7 +201,7 @@ index 6d542e022..a37861985 100644 config := meta.(*Config) userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { -@@ -424,10 +446,10 @@ func resourceDataflowJobUpdateByReplacement(d *schema.ResourceData, meta interfa +@@ -428,10 +447,10 @@ func resourceDataflowJobUpdateByReplacement(d *schema.ResourceData, meta interfa } if err := waitForDataflowJobToBeUpdated(d, config, response.Job.Id, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -211,7 +214,7 @@ index 6d542e022..a37861985 100644 return resourceDataflowJobRead(d, meta) } -@@ -449,7 +471,12 @@ func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error { +@@ -453,7 +472,12 @@ func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error { return err } @@ -225,7 +228,7 @@ index 6d542e022..a37861985 100644 requestedState, err := resourceDataflowJobMapRequestedState(d.Get("on_delete").(string)) if err != nil { -@@ -511,7 +538,7 @@ func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error { +@@ -515,7 +539,7 @@ func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error { d.SetId("") return nil } @@ -234,7 +237,7 @@ index 6d542e022..a37861985 100644 } func resourceDataflowJobMapRequestedState(policy string) (string, error) { -@@ -532,6 +559,34 @@ func resourceDataflowJobCreateJob(config *Config, project, region, userAgent str +@@ -536,6 +560,34 @@ func resourceDataflowJobCreateJob(config *Config, project, region, userAgent str return config.NewDataflowClient(userAgent).Projects.Locations.Templates.Create(project, region, request).Do() } @@ -269,7 +272,7 @@ index 6d542e022..a37861985 100644 func resourceDataflowJobGetJob(config *Config, project, region, userAgent string, id string) (*dataflow.Job, error) { if region == "" { return config.NewDataflowClient(userAgent).Projects.Jobs.Get(project, id).View("JOB_VIEW_ALL").Do() -@@ -556,6 +611,11 @@ func resourceDataflowJobLaunchTemplate(config *Config, project, region, userAgen +@@ -560,6 +612,11 @@ func resourceDataflowJobLaunchTemplate(config *Config, project, region, userAgen func resourceDataflowJobSetupEnv(d *schema.ResourceData, config *Config) (dataflow.RuntimeEnvironment, error) { zone, _ := getZone(d, config) @@ -281,7 +284,7 @@ index 6d542e022..a37861985 100644 labels := expandStringMap(d, "labels") additionalExperiments := convertStringSet(d.Get("additional_experiments").(*schema.Set)) -@@ -564,7 +624,7 @@ func resourceDataflowJobSetupEnv(d *schema.ResourceData, config *Config) (datafl +@@ -568,7 +625,7 @@ func resourceDataflowJobSetupEnv(d *schema.ResourceData, config *Config) (datafl MaxWorkers: int64(d.Get("max_workers").(int)), Network: d.Get("network").(string), ServiceAccountEmail: d.Get("service_account_email").(string), @@ -290,7 +293,7 @@ index 6d542e022..a37861985 100644 TempLocation: d.Get("temp_gcs_location").(string), MachineType: d.Get("machine_type").(string), KmsKeyName: d.Get("kms_key_name").(string), -@@ -655,3 +715,29 @@ func waitForDataflowJobToBeUpdated(d *schema.ResourceData, config *Config, repla +@@ -659,3 +716,29 @@ func waitForDataflowJobToBeUpdated(d *schema.ResourceData, config *Config, repla } }) } diff --git a/pkg/clients/generated/apis/compute/v1beta1/computebackendbucket_types.go b/pkg/clients/generated/apis/compute/v1beta1/computebackendbucket_types.go index c5b3a15a7d..a678573eb3 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/computebackendbucket_types.go +++ b/pkg/clients/generated/apis/compute/v1beta1/computebackendbucket_types.go @@ -35,6 +35,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +type BackendbucketBypassCacheOnRequestHeaders struct { + /* The header field name to match on when bypassing cache. Values are case-insensitive. */ + // +optional + HeaderName *string `json:"headerName,omitempty"` +} + type BackendbucketCacheKeyPolicy struct { /* Allows HTTP request headers (by name) to be used in the cache key. */ @@ -49,6 +55,10 @@ type BackendbucketCacheKeyPolicy struct { } type BackendbucketCdnPolicy struct { + /* Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. */ + // +optional + BypassCacheOnRequestHeaders []BackendbucketBypassCacheOnRequestHeaders `json:"bypassCacheOnRequestHeaders,omitempty"` + /* The CacheKeyPolicy for this CdnPolicy. */ // +optional CacheKeyPolicy *BackendbucketCacheKeyPolicy `json:"cacheKeyPolicy,omitempty"` @@ -80,6 +90,10 @@ type BackendbucketCdnPolicy struct { // +optional NegativeCachingPolicy []BackendbucketNegativeCachingPolicy `json:"negativeCachingPolicy,omitempty"` + /* If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. */ + // +optional + RequestCoalescing *bool `json:"requestCoalescing,omitempty"` + /* Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. */ // +optional ServeWhileStale *int `json:"serveWhileStale,omitempty"` diff --git a/pkg/clients/generated/apis/compute/v1beta1/computebackendservice_types.go b/pkg/clients/generated/apis/compute/v1beta1/computebackendservice_types.go index a144acfa8e..b0c246f3fb 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/computebackendservice_types.go +++ b/pkg/clients/generated/apis/compute/v1beta1/computebackendservice_types.go @@ -40,7 +40,10 @@ type BackendserviceBackend struct { For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) - and CONNECTION (for TCP/SSL). Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]. */ + and CONNECTION (for TCP/SSL). + + See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) + for an explanation of load balancing modes. Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]. */ // +optional BalancingMode *string `json:"balancingMode,omitempty"` diff --git a/pkg/clients/generated/apis/compute/v1beta1/computeinstance_types.go b/pkg/clients/generated/apis/compute/v1beta1/computeinstance_types.go index 112ef2f725..d1f1d9fad1 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/computeinstance_types.go +++ b/pkg/clients/generated/apis/compute/v1beta1/computeinstance_types.go @@ -275,6 +275,10 @@ type InstanceScheduling struct { // +optional AutomaticRestart *bool `json:"automaticRestart,omitempty"` + /* Specifies the action GCE should take when SPOT VM is preempted. */ + // +optional + InstanceTerminationAction *string `json:"instanceTerminationAction,omitempty"` + /* */ // +optional MinNodeCpus *int `json:"minNodeCpus,omitempty"` diff --git a/pkg/clients/generated/apis/compute/v1beta1/computeinstancetemplate_types.go b/pkg/clients/generated/apis/compute/v1beta1/computeinstancetemplate_types.go index 77e08ae045..4b2fa6f439 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/computeinstancetemplate_types.go +++ b/pkg/clients/generated/apis/compute/v1beta1/computeinstancetemplate_types.go @@ -244,6 +244,10 @@ type InstancetemplateScheduling struct { // +optional AutomaticRestart *bool `json:"automaticRestart,omitempty"` + /* Immutable. Specifies the action GCE should take when SPOT VM is preempted. */ + // +optional + InstanceTerminationAction *string `json:"instanceTerminationAction,omitempty"` + /* Minimum number of cpus for the instance. */ // +optional MinNodeCpus *int `json:"minNodeCpus,omitempty"` diff --git a/pkg/clients/generated/apis/compute/v1beta1/computenetworkendpointgroup_types.go b/pkg/clients/generated/apis/compute/v1beta1/computenetworkendpointgroup_types.go index fed9dc93a2..dac0d2c7d6 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/computenetworkendpointgroup_types.go +++ b/pkg/clients/generated/apis/compute/v1beta1/computenetworkendpointgroup_types.go @@ -55,7 +55,9 @@ type ComputeNetworkEndpointGroupSpec struct { Note that NON_GCP_PRIVATE_IP_PORT can only be used with Backend Services that 1) have the following load balancing schemes: EXTERNAL, EXTERNAL_MANAGED, INTERNAL_MANAGED, and INTERNAL_SELF_MANAGED and 2) support the RATE or - CONNECTION balancing modes. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"]. */ + CONNECTION balancing modes. + + Possible values include: GCE_VM_IP, GCE_VM_IP_PORT, and NON_GCP_PRIVATE_IP_PORT. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP", "GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"]. */ // +optional NetworkEndpointType *string `json:"networkEndpointType,omitempty"` diff --git a/pkg/clients/generated/apis/compute/v1beta1/computeregionnetworkendpointgroup_types.go b/pkg/clients/generated/apis/compute/v1beta1/computeregionnetworkendpointgroup_types.go index 799c35f75a..aeec6bfc5e 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/computeregionnetworkendpointgroup_types.go +++ b/pkg/clients/generated/apis/compute/v1beta1/computeregionnetworkendpointgroup_types.go @@ -103,6 +103,12 @@ type ComputeRegionNetworkEndpointGroupSpec struct { // +optional NetworkEndpointType *string `json:"networkEndpointType,omitempty"` + /* Immutable. This field is only used for PSC. + The URL of the network to which all network endpoints in the NEG belong. Uses + "default" project network if unspecified. */ + // +optional + NetworkRef *v1alpha1.ResourceRef `json:"networkRef,omitempty"` + /* Immutable. The target service url used to set up private service connection to a Google API or a PSC Producer Service Attachment. */ // +optional @@ -114,6 +120,11 @@ type ComputeRegionNetworkEndpointGroupSpec struct { /* Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default. */ // +optional ResourceID *string `json:"resourceID,omitempty"` + + /* Immutable. This field is only used for PSC. + Optional URL of the subnetwork to which all network endpoints in the NEG belong. */ + // +optional + SubnetworkRef *v1alpha1.ResourceRef `json:"subnetworkRef,omitempty"` } type ComputeRegionNetworkEndpointGroupStatus struct { diff --git a/pkg/clients/generated/apis/compute/v1beta1/computetargethttpsproxy_types.go b/pkg/clients/generated/apis/compute/v1beta1/computetargethttpsproxy_types.go index 4eb1975050..ec0f1e7dd2 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/computetargethttpsproxy_types.go +++ b/pkg/clients/generated/apis/compute/v1beta1/computetargethttpsproxy_types.go @@ -36,6 +36,14 @@ import ( ) type ComputeTargetHTTPSProxySpec struct { + /* Only the `external` field is supported to configure the reference. + + A reference to the CertificateMap resource uri that identifies a + certificate map associated with the given target proxy. This field + can only be set for global target proxies. */ + // +optional + CertificateMapRef *v1alpha1.ResourceRef `json:"certificateMapRef,omitempty"` + /* Immutable. An optional description of this resource. */ // +optional Description *string `json:"description,omitempty"` @@ -61,7 +69,8 @@ type ComputeTargetHTTPSProxySpec struct { ResourceID *string `json:"resourceID,omitempty"` /* */ - SslCertificates []v1alpha1.ResourceRef `json:"sslCertificates"` + // +optional + SslCertificates []v1alpha1.ResourceRef `json:"sslCertificates,omitempty"` /* A reference to the ComputeSSLPolicy resource that will be associated with the ComputeTargetHTTPSProxy resource. If not set, diff --git a/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go index e69e368cc6..43afdb66de 100644 --- a/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/compute/v1beta1/zz_generated.deepcopy.go @@ -29,6 +29,27 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendbucketBypassCacheOnRequestHeaders) DeepCopyInto(out *BackendbucketBypassCacheOnRequestHeaders) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendbucketBypassCacheOnRequestHeaders. +func (in *BackendbucketBypassCacheOnRequestHeaders) DeepCopy() *BackendbucketBypassCacheOnRequestHeaders { + if in == nil { + return nil + } + out := new(BackendbucketBypassCacheOnRequestHeaders) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackendbucketCacheKeyPolicy) DeepCopyInto(out *BackendbucketCacheKeyPolicy) { *out = *in @@ -58,6 +79,13 @@ func (in *BackendbucketCacheKeyPolicy) DeepCopy() *BackendbucketCacheKeyPolicy { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackendbucketCdnPolicy) DeepCopyInto(out *BackendbucketCdnPolicy) { *out = *in + if in.BypassCacheOnRequestHeaders != nil { + in, out := &in.BypassCacheOnRequestHeaders, &out.BypassCacheOnRequestHeaders + *out = make([]BackendbucketBypassCacheOnRequestHeaders, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.CacheKeyPolicy != nil { in, out := &in.CacheKeyPolicy, &out.CacheKeyPolicy *out = new(BackendbucketCacheKeyPolicy) @@ -95,6 +123,11 @@ func (in *BackendbucketCdnPolicy) DeepCopyInto(out *BackendbucketCdnPolicy) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.RequestCoalescing != nil { + in, out := &in.RequestCoalescing, &out.RequestCoalescing + *out = new(bool) + **out = **in + } if in.ServeWhileStale != nil { in, out := &in.ServeWhileStale, &out.ServeWhileStale *out = new(int) @@ -4889,6 +4922,11 @@ func (in *ComputeRegionNetworkEndpointGroupSpec) DeepCopyInto(out *ComputeRegion *out = new(string) **out = **in } + if in.NetworkRef != nil { + in, out := &in.NetworkRef, &out.NetworkRef + *out = new(v1alpha1.ResourceRef) + **out = **in + } if in.PscTargetService != nil { in, out := &in.PscTargetService, &out.PscTargetService *out = new(string) @@ -4899,6 +4937,11 @@ func (in *ComputeRegionNetworkEndpointGroupSpec) DeepCopyInto(out *ComputeRegion *out = new(string) **out = **in } + if in.SubnetworkRef != nil { + in, out := &in.SubnetworkRef, &out.SubnetworkRef + *out = new(v1alpha1.ResourceRef) + **out = **in + } return } @@ -7142,6 +7185,11 @@ func (in *ComputeTargetHTTPSProxyList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComputeTargetHTTPSProxySpec) DeepCopyInto(out *ComputeTargetHTTPSProxySpec) { *out = *in + if in.CertificateMapRef != nil { + in, out := &in.CertificateMapRef, &out.CertificateMapRef + *out = new(v1alpha1.ResourceRef) + **out = **in + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -9500,6 +9548,11 @@ func (in *InstanceScheduling) DeepCopyInto(out *InstanceScheduling) { *out = new(bool) **out = **in } + if in.InstanceTerminationAction != nil { + in, out := &in.InstanceTerminationAction, &out.InstanceTerminationAction + *out = new(string) + **out = **in + } if in.MinNodeCpus != nil { in, out := &in.MinNodeCpus, &out.MinNodeCpus *out = new(int) @@ -10622,6 +10675,11 @@ func (in *InstancetemplateScheduling) DeepCopyInto(out *InstancetemplateScheduli *out = new(bool) **out = **in } + if in.InstanceTerminationAction != nil { + in, out := &in.InstanceTerminationAction, &out.InstanceTerminationAction + *out = new(string) + **out = **in + } if in.MinNodeCpus != nil { in, out := &in.MinNodeCpus, &out.MinNodeCpus *out = new(int) diff --git a/pkg/clients/generated/apis/container/v1beta1/containercluster_types.go b/pkg/clients/generated/apis/container/v1beta1/containercluster_types.go index bf5e799571..3ced96565e 100644 --- a/pkg/clients/generated/apis/container/v1beta1/containercluster_types.go +++ b/pkg/clients/generated/apis/container/v1beta1/containercluster_types.go @@ -48,7 +48,7 @@ type ClusterAddonsConfig struct { // +optional DnsCacheConfig *ClusterDnsCacheConfig `json:"dnsCacheConfig,omitempty"` - /* Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Defaults to disabled; set enabled = true to enable. */ + /* Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Defaults to enabled; set disabled = true to disable. */ // +optional GcePersistentDiskCsiDriverConfig *ClusterGcePersistentDiskCsiDriverConfig `json:"gcePersistentDiskCsiDriverConfig,omitempty"` @@ -82,11 +82,16 @@ type ClusterAddonsConfig struct { } type ClusterAuthenticatorGroupsConfig struct { - /* Immutable. The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com. */ + /* The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com. */ SecurityGroup string `json:"securityGroup"` } type ClusterAutoProvisioningDefaults struct { + /* Immutable. The Customer Managed Encryption Key used to encrypt the + boot disk attached to each node in the node pool. */ + // +optional + BootDiskKMSKeyRef *v1alpha1.ResourceRef `json:"bootDiskKMSKeyRef,omitempty"` + /* The default image type used by NAP once a new node pool is being created. */ // +optional ImageType *string `json:"imageType,omitempty"` @@ -109,6 +114,16 @@ type ClusterBigqueryDestination struct { DatasetId string `json:"datasetId"` } +type ClusterBinaryAuthorization struct { + /* DEPRECATED. Deprecated in favor of evaluation_mode. Enable Binary Authorization for this cluster. */ + // +optional + Enabled *bool `json:"enabled,omitempty"` + + /* Mode of operation for Binary Authorization policy evaluation. */ + // +optional + EvaluationMode *string `json:"evaluationMode,omitempty"` +} + type ClusterCidrBlocks struct { /* External network that can access Kubernetes master through HTTPS. Must be specified in CIDR notation. */ CidrBlock string `json:"cidrBlock"` @@ -395,8 +410,13 @@ type ClusterMasterGlobalAccessConfig struct { Enabled bool `json:"enabled"` } +type ClusterMeshCertificates struct { + /* When enabled the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster. */ + EnableCertificates bool `json:"enableCertificates"` +} + type ClusterMonitoringConfig struct { - /* GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS and WORKLOADS. */ + /* GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, SCHEDULER, and WORKLOADS. */ // +optional EnableComponents []string `json:"enableComponents,omitempty"` @@ -544,10 +564,10 @@ type ClusterPodSecurityPolicyConfig struct { } type ClusterPrivateClusterConfig struct { - /* Immutable. Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking. */ + /* Immutable. When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. This field only applies to private clusters, when enable_private_nodes is true. */ EnablePrivateEndpoint bool `json:"enablePrivateEndpoint"` - /* Immutable. When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. This field only applies to private clusters, when enable_private_nodes is true. */ + /* Immutable. Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking. */ // +optional EnablePrivateNodes *bool `json:"enablePrivateNodes,omitempty"` @@ -690,10 +710,14 @@ type ContainerClusterSpec struct { // +optional AddonsConfig *ClusterAddonsConfig `json:"addonsConfig,omitempty"` - /* Immutable. Configuration for the Google Groups for GKE feature. */ + /* Configuration for the Google Groups for GKE feature. */ // +optional AuthenticatorGroupsConfig *ClusterAuthenticatorGroupsConfig `json:"authenticatorGroupsConfig,omitempty"` + /* Configuration options for the Binary Authorization feature. */ + // +optional + BinaryAuthorization *ClusterBinaryAuthorization `json:"binaryAuthorization,omitempty"` + /* Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to automatically adjust the size of the cluster and create/delete node pools based on the current needs of the cluster's workload. See the guide to using Node Auto-Provisioning for more details. */ // +optional ClusterAutoscaling *ClusterClusterAutoscaling `json:"clusterAutoscaling,omitempty"` @@ -738,7 +762,7 @@ type ContainerClusterSpec struct { // +optional EnableAutopilot *bool `json:"enableAutopilot,omitempty"` - /* Enable Binary Authorization for this cluster. If enabled, all container images will be validated by Google Binary Authorization. */ + /* DEPRECATED. Deprecated in favor of binary_authorization. Enable Binary Authorization for this cluster. If enabled, all container images will be validated by Google Binary Authorization. */ // +optional EnableBinaryAuthorization *bool `json:"enableBinaryAuthorization,omitempty"` @@ -801,6 +825,10 @@ type ContainerClusterSpec struct { // +optional MasterAuthorizedNetworksConfig *ClusterMasterAuthorizedNetworksConfig `json:"masterAuthorizedNetworksConfig,omitempty"` + /* If set, and enable_certificates=true, the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster. */ + // +optional + MeshCertificates *ClusterMeshCertificates `json:"meshCertificates,omitempty"` + /* The minimum version of the master. GKE will auto-update the master to new versions, so this does not guarantee the current master version--use the read-only master_version field to obtain that. If unset, the cluster's version will be set by GKE to the version of the most recent official release (which is not necessarily the latest version). */ // +optional MinMasterVersion *string `json:"minMasterVersion,omitempty"` diff --git a/pkg/clients/generated/apis/container/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/container/v1beta1/zz_generated.deepcopy.go index b546777fb9..02b6915769 100644 --- a/pkg/clients/generated/apis/container/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/container/v1beta1/zz_generated.deepcopy.go @@ -119,6 +119,11 @@ func (in *ClusterAuthenticatorGroupsConfig) DeepCopy() *ClusterAuthenticatorGrou // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterAutoProvisioningDefaults) DeepCopyInto(out *ClusterAutoProvisioningDefaults) { *out = *in + if in.BootDiskKMSKeyRef != nil { + in, out := &in.BootDiskKMSKeyRef, &out.BootDiskKMSKeyRef + *out = new(v1alpha1.ResourceRef) + **out = **in + } if in.ImageType != nil { in, out := &in.ImageType, &out.ImageType *out = new(string) @@ -168,6 +173,32 @@ func (in *ClusterBigqueryDestination) DeepCopy() *ClusterBigqueryDestination { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterBinaryAuthorization) DeepCopyInto(out *ClusterBinaryAuthorization) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EvaluationMode != nil { + in, out := &in.EvaluationMode, &out.EvaluationMode + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBinaryAuthorization. +func (in *ClusterBinaryAuthorization) DeepCopy() *ClusterBinaryAuthorization { + if in == nil { + return nil + } + out := new(ClusterBinaryAuthorization) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterCidrBlocks) DeepCopyInto(out *ClusterCidrBlocks) { *out = *in @@ -896,6 +927,22 @@ func (in *ClusterMasterGlobalAccessConfig) DeepCopy() *ClusterMasterGlobalAccess return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMeshCertificates) DeepCopyInto(out *ClusterMeshCertificates) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMeshCertificates. +func (in *ClusterMeshCertificates) DeepCopy() *ClusterMeshCertificates { + if in == nil { + return nil + } + out := new(ClusterMeshCertificates) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterMonitoringConfig) DeepCopyInto(out *ClusterMonitoringConfig) { *out = *in @@ -1538,6 +1585,11 @@ func (in *ContainerClusterSpec) DeepCopyInto(out *ContainerClusterSpec) { *out = new(ClusterAuthenticatorGroupsConfig) **out = **in } + if in.BinaryAuthorization != nil { + in, out := &in.BinaryAuthorization, &out.BinaryAuthorization + *out = new(ClusterBinaryAuthorization) + (*in).DeepCopyInto(*out) + } if in.ClusterAutoscaling != nil { in, out := &in.ClusterAutoscaling, &out.ClusterAutoscaling *out = new(ClusterClusterAutoscaling) @@ -1668,6 +1720,11 @@ func (in *ContainerClusterSpec) DeepCopyInto(out *ContainerClusterSpec) { *out = new(ClusterMasterAuthorizedNetworksConfig) (*in).DeepCopyInto(*out) } + if in.MeshCertificates != nil { + in, out := &in.MeshCertificates, &out.MeshCertificates + *out = new(ClusterMeshCertificates) + **out = **in + } if in.MinMasterVersion != nil { in, out := &in.MinMasterVersion, &out.MinMasterVersion *out = new(string) diff --git a/pkg/clients/generated/apis/dataproc/v1beta1/dataprocworkflowtemplate_types.go b/pkg/clients/generated/apis/dataproc/v1beta1/dataprocworkflowtemplate_types.go index ffa745e2ab..beac45c7b3 100644 --- a/pkg/clients/generated/apis/dataproc/v1beta1/dataprocworkflowtemplate_types.go +++ b/pkg/clients/generated/apis/dataproc/v1beta1/dataprocworkflowtemplate_types.go @@ -554,7 +554,7 @@ type WorkflowtemplatePysparkJob struct { } type WorkflowtemplateQueryList struct { - /* Immutable. Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } */ + /* Immutable. Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } */ Queries []string `json:"queries"` } diff --git a/pkg/clients/generated/apis/gkehub/v1beta1/gkehubmembership_types.go b/pkg/clients/generated/apis/gkehub/v1beta1/gkehubmembership_types.go index 25b4254b11..8137276b3e 100644 --- a/pkg/clients/generated/apis/gkehub/v1beta1/gkehubmembership_types.go +++ b/pkg/clients/generated/apis/gkehub/v1beta1/gkehubmembership_types.go @@ -110,7 +110,7 @@ type MembershipAuthorityStatus struct { /* Output only. An identity provider that reflects the `issuer` in the workload identity pool. */ IdentityProvider string `json:"identityProvider,omitempty"` - /* Output only. The name of the workload identity pool in which `issuer` will be recognized. There is a single Workload Identity Pool per Hub that is shared between all Memberships that belong to that Hub. For a Hub hosted in: {PROJECT_ID}, the workload pool format is `{PROJECT_ID}.hub.id.goog`, although this is subject to change in newer versions of this API. */ + /* Output only. The name of the workload identity pool in which `issuer` will be recognized. There is a single Workload Identity Pool per Hub that is shared between all Memberships that belong to that Hub. For a Hub hosted in {PROJECT_ID}, the workload pool format is `{PROJECT_ID}.hub.id.goog`, although this is subject to change in newer versions of this API. */ WorkloadIdentityPool string `json:"workloadIdentityPool,omitempty"` } diff --git a/pkg/clients/generated/apis/identityplatform/v1beta1/identityplatformoauthidpconfig_types.go b/pkg/clients/generated/apis/identityplatform/v1beta1/identityplatformoauthidpconfig_types.go index 008c849936..f0407fcb8e 100644 --- a/pkg/clients/generated/apis/identityplatform/v1beta1/identityplatformoauthidpconfig_types.go +++ b/pkg/clients/generated/apis/identityplatform/v1beta1/identityplatformoauthidpconfig_types.go @@ -90,7 +90,7 @@ type IdentityPlatformOAuthIDPConfigSpec struct { // +optional ResourceID *string `json:"resourceID,omitempty"` - /* The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g.: {id\_token, token}). */ + /* The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g. {id\_token, token}). */ // +optional ResponseType *OauthidpconfigResponseType `json:"responseType,omitempty"` } diff --git a/pkg/clients/generated/apis/identityplatform/v1beta1/identityplatformtenantoauthidpconfig_types.go b/pkg/clients/generated/apis/identityplatform/v1beta1/identityplatformtenantoauthidpconfig_types.go index bae4e999d4..25063f96e5 100644 --- a/pkg/clients/generated/apis/identityplatform/v1beta1/identityplatformtenantoauthidpconfig_types.go +++ b/pkg/clients/generated/apis/identityplatform/v1beta1/identityplatformtenantoauthidpconfig_types.go @@ -90,7 +90,7 @@ type IdentityPlatformTenantOAuthIDPConfigSpec struct { // +optional ResourceID *string `json:"resourceID,omitempty"` - /* The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g.: {id\_token, token}). */ + /* The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g. {id\_token, token}). */ // +optional ResponseType *TenantoauthidpconfigResponseType `json:"responseType,omitempty"` diff --git a/pkg/clients/generated/apis/kms/v1beta1/kmscryptokey_types.go b/pkg/clients/generated/apis/kms/v1beta1/kmscryptokey_types.go index f4e2350f0f..a11a64d105 100644 --- a/pkg/clients/generated/apis/kms/v1beta1/kmscryptokey_types.go +++ b/pkg/clients/generated/apis/kms/v1beta1/kmscryptokey_types.go @@ -40,7 +40,7 @@ type CryptokeyVersionTemplate struct { See the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm) for possible inputs. */ Algorithm string `json:"algorithm"` - /* Immutable. The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL". Defaults to "SOFTWARE". */ + /* Immutable. The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL", "EXTERNAL_VPC". Defaults to "SOFTWARE". */ // +optional ProtectionLevel *string `json:"protectionLevel,omitempty"` } @@ -60,7 +60,7 @@ type KMSCryptoKeySpec struct { /* Immutable. The immutable purpose of this CryptoKey. See the [purpose reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) - for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT"]. */ + for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", "MAC"]. */ // +optional Purpose *string `json:"purpose,omitempty"` diff --git a/pkg/clients/generated/apis/logging/v1beta1/logginglogmetric_types.go b/pkg/clients/generated/apis/logging/v1beta1/logginglogmetric_types.go index c73e8fc1a9..5993b5d20b 100644 --- a/pkg/clients/generated/apis/logging/v1beta1/logginglogmetric_types.go +++ b/pkg/clients/generated/apis/logging/v1beta1/logginglogmetric_types.go @@ -128,7 +128,7 @@ type LogmetricMetricDescriptor struct { // +optional MetricKind *string `json:"metricKind,omitempty"` - /* The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component: { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, "new users per day" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new users). Alternatively, "thousands of page views per day" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean "5300 page views per day"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means "3 percent"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means "3 percent"). */ + /* The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, "new users per day" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new users). Alternatively, "thousands of page views per day" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean "5300 page views per day"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means "3 percent"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means "3 percent"). */ // +optional Unit *string `json:"unit,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringalertpolicy_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringalertpolicy_types.go index 163446343a..59f40badf6 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringalertpolicy_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringalertpolicy_types.go @@ -211,6 +211,12 @@ type AlertpolicyConditionMonitoringQueryLanguage struct { alerted on quickly. */ Duration string `json:"duration"` + /* A condition control that determines how + metric-threshold conditions are evaluated when + data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]. */ + // +optional + EvaluationMissingData *string `json:"evaluationMissingData,omitempty"` + /* Monitoring Query Language query that outputs a boolean stream. */ Query string `json:"query"` @@ -307,6 +313,12 @@ type AlertpolicyConditionThreshold struct { alerted on quickly. */ Duration string `json:"duration"` + /* A condition control that determines how + metric-threshold conditions are evaluated when + data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]. */ + // +optional + EvaluationMissingData *string `json:"evaluationMissingData,omitempty"` + /* A filter that identifies which time series should be compared with the threshold.The filter is similar to the one that is diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go index 4f9303bdc1..caedc2413e 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringdashboard_types.go @@ -192,7 +192,7 @@ type DashboardScorecard struct { // +optional SparkChartView *DashboardSparkChartView `json:"sparkChartView,omitempty"` - /* The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state. */ + /* The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state. */ // +optional Thresholds []DashboardThresholds `json:"thresholds,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringmetricdescriptor_types.go b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringmetricdescriptor_types.go index 5dbbefc224..07933f6bf8 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/monitoringmetricdescriptor_types.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/monitoringmetricdescriptor_types.go @@ -93,7 +93,7 @@ type MonitoringMetricDescriptorSpec struct { /* Immutable. The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com` or `external.googleapis.com`. Metric types should use a natural hierarchical grouping. For example: "custom.googleapis.com/invoice/paid/amount" "external.googleapis.com/prometheus/up" "appengine.googleapis.com/http/server/response_latencies" */ Type string `json:"type"` - /* Immutable. The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component: { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, "new users per day" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new users). Alternatively, "thousands of page views per day" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean "5300 page views per day"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means "3 percent"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means "3 percent"). */ + /* Immutable. The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, "new users per day" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new users). Alternatively, "thousands of page views per day" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean "5300 page views per day"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means "3 percent"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means "3 percent"). */ // +optional Unit *string `json:"unit,omitempty"` diff --git a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go index c79ed9f8f9..c33af6f5a5 100644 --- a/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -150,6 +150,11 @@ func (in *AlertpolicyConditionMatchedLog) DeepCopy() *AlertpolicyConditionMatche // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AlertpolicyConditionMonitoringQueryLanguage) DeepCopyInto(out *AlertpolicyConditionMonitoringQueryLanguage) { *out = *in + if in.EvaluationMissingData != nil { + in, out := &in.EvaluationMissingData, &out.EvaluationMissingData + *out = new(string) + **out = **in + } if in.Trigger != nil { in, out := &in.Trigger, &out.Trigger *out = new(AlertpolicyTrigger) @@ -190,6 +195,11 @@ func (in *AlertpolicyConditionThreshold) DeepCopyInto(out *AlertpolicyConditionT *out = new(string) **out = **in } + if in.EvaluationMissingData != nil { + in, out := &in.EvaluationMissingData, &out.EvaluationMissingData + *out = new(string) + **out = **in + } if in.Filter != nil { in, out := &in.Filter, &out.Filter *out = new(string) diff --git a/pkg/clients/generated/apis/osconfig/v1beta1/osconfigguestpolicy_types.go b/pkg/clients/generated/apis/osconfig/v1beta1/osconfigguestpolicy_types.go index 88aa9108dc..12263be2b9 100644 --- a/pkg/clients/generated/apis/osconfig/v1beta1/osconfigguestpolicy_types.go +++ b/pkg/clients/generated/apis/osconfig/v1beta1/osconfigguestpolicy_types.go @@ -300,7 +300,7 @@ type GuestpolicyRemote struct { // +optional Checksum *string `json:"checksum,omitempty"` - /* URI from which to fetch the object. It should contain both the protocol and path following the format: {protocol}://{location}. */ + /* URI from which to fetch the object. It should contain both the protocol and path following the format {protocol}://{location}. */ // +optional Uri *string `json:"uri,omitempty"` } diff --git a/pkg/clients/generated/apis/pubsub/v1beta1/pubsubsubscription_types.go b/pkg/clients/generated/apis/pubsub/v1beta1/pubsubsubscription_types.go index 982bbcc415..893b9c1a51 100644 --- a/pkg/clients/generated/apis/pubsub/v1beta1/pubsubsubscription_types.go +++ b/pkg/clients/generated/apis/pubsub/v1beta1/pubsubsubscription_types.go @@ -35,6 +35,25 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +type SubscriptionBigqueryConfig struct { + /* When true and useTopicSchema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. + Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog. */ + // +optional + DropUnknownFields *bool `json:"dropUnknownFields,omitempty"` + + /* The name of the table to which to write data. */ + TableRef v1alpha1.ResourceRef `json:"tableRef"` + + /* When true, use the topic's schema as the columns to write to in BigQuery, if it exists. */ + // +optional + UseTopicSchema *bool `json:"useTopicSchema,omitempty"` + + /* When true, write the subscription name, messageId, publishTime, attributes, and orderingKey to additional columns in the table. + The subscription name, messageId, and publishTime fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. */ + // +optional + WriteMetadata *bool `json:"writeMetadata,omitempty"` +} + type SubscriptionDeadLetterPolicy struct { /* */ // +optional @@ -154,6 +173,12 @@ type PubSubSubscriptionSpec struct { // +optional AckDeadlineSeconds *int `json:"ackDeadlineSeconds,omitempty"` + /* If delivery to BigQuery is used with this subscription, this field is used to configure it. + Either pushConfig or bigQueryConfig can be set, but not both. + If both are empty, then the subscriber will pull and ack messages using API methods. */ + // +optional + BigqueryConfig *SubscriptionBigqueryConfig `json:"bigqueryConfig,omitempty"` + /* A policy that specifies the conditions for dead lettering messages in this subscription. If dead_letter_policy is not set, dead lettering is disabled. diff --git a/pkg/clients/generated/apis/pubsub/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/pubsub/v1beta1/zz_generated.deepcopy.go index 15446ee7e8..3cc62f58fc 100644 --- a/pkg/clients/generated/apis/pubsub/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/pubsub/v1beta1/zz_generated.deepcopy.go @@ -212,6 +212,11 @@ func (in *PubSubSubscriptionSpec) DeepCopyInto(out *PubSubSubscriptionSpec) { *out = new(int) **out = **in } + if in.BigqueryConfig != nil { + in, out := &in.BigqueryConfig, &out.BigqueryConfig + *out = new(SubscriptionBigqueryConfig) + (*in).DeepCopyInto(*out) + } if in.DeadLetterPolicy != nil { in, out := &in.DeadLetterPolicy, &out.DeadLetterPolicy *out = new(SubscriptionDeadLetterPolicy) @@ -420,6 +425,38 @@ func (in *PubSubTopicStatus) DeepCopy() *PubSubTopicStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionBigqueryConfig) DeepCopyInto(out *SubscriptionBigqueryConfig) { + *out = *in + if in.DropUnknownFields != nil { + in, out := &in.DropUnknownFields, &out.DropUnknownFields + *out = new(bool) + **out = **in + } + out.TableRef = in.TableRef + if in.UseTopicSchema != nil { + in, out := &in.UseTopicSchema, &out.UseTopicSchema + *out = new(bool) + **out = **in + } + if in.WriteMetadata != nil { + in, out := &in.WriteMetadata, &out.WriteMetadata + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionBigqueryConfig. +func (in *SubscriptionBigqueryConfig) DeepCopy() *SubscriptionBigqueryConfig { + if in == nil { + return nil + } + out := new(SubscriptionBigqueryConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SubscriptionDeadLetterPolicy) DeepCopyInto(out *SubscriptionDeadLetterPolicy) { *out = *in diff --git a/pkg/clients/generated/apis/redis/v1beta1/redisinstance_types.go b/pkg/clients/generated/apis/redis/v1beta1/redisinstance_types.go index b2e42df9f6..00134776f8 100644 --- a/pkg/clients/generated/apis/redis/v1beta1/redisinstance_types.go +++ b/pkg/clients/generated/apis/redis/v1beta1/redisinstance_types.go @@ -153,6 +153,12 @@ type RedisInstanceSpec struct { // +optional ConnectMode *string `json:"connectMode,omitempty"` + /* Immutable. Optional. The KMS key reference that you want to use to + encrypt the data at rest for this Redis instance. If this is + provided, CMEK is enabled. */ + // +optional + CustomerManagedKeyRef *v1alpha1.ResourceRef `json:"customerManagedKeyRef,omitempty"` + /* An arbitrary and optional user-provided name for the instance. */ // +optional DisplayName *string `json:"displayName,omitempty"` diff --git a/pkg/clients/generated/apis/redis/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/redis/v1beta1/zz_generated.deepcopy.go index c65de815ed..f938c5ddcd 100644 --- a/pkg/clients/generated/apis/redis/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/redis/v1beta1/zz_generated.deepcopy.go @@ -277,6 +277,11 @@ func (in *RedisInstanceSpec) DeepCopyInto(out *RedisInstanceSpec) { *out = new(string) **out = **in } + if in.CustomerManagedKeyRef != nil { + in, out := &in.CustomerManagedKeyRef, &out.CustomerManagedKeyRef + *out = new(v1alpha1.ResourceRef) + **out = **in + } if in.DisplayName != nil { in, out := &in.DisplayName, &out.DisplayName *out = new(string) diff --git a/pkg/clients/generated/apis/spanner/v1beta1/spannerdatabase_types.go b/pkg/clients/generated/apis/spanner/v1beta1/spannerdatabase_types.go index 8e26cad85b..143f73fa25 100644 --- a/pkg/clients/generated/apis/spanner/v1beta1/spannerdatabase_types.go +++ b/pkg/clients/generated/apis/spanner/v1beta1/spannerdatabase_types.go @@ -42,7 +42,8 @@ type DatabaseEncryptionConfig struct { } type SpannerDatabaseSpec struct { - /* */ + /* Immutable. The dialect of the Cloud Spanner Database. + If it is not provided, "GOOGLE_STANDARD_SQL" will be used. Possible values: ["GOOGLE_STANDARD_SQL", "POSTGRESQL"]. */ // +optional DatabaseDialect *string `json:"databaseDialect,omitempty"` @@ -63,6 +64,14 @@ type SpannerDatabaseSpec struct { /* Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default. */ // +optional ResourceID *string `json:"resourceID,omitempty"` + + /* The retention period for the database. The retention period must be between 1 hour + and 7 days, and can be specified in days, hours, minutes, or seconds. For example, + the values 1d, 24h, 1440m, and 86400s are equivalent. Default value is 1h. + If this property is used, you must avoid adding new DDL statements to 'ddl' that + update the database's version_retention_period. */ + // +optional + VersionRetentionPeriod *string `json:"versionRetentionPeriod,omitempty"` } type SpannerDatabaseStatus struct { diff --git a/pkg/clients/generated/apis/spanner/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/spanner/v1beta1/zz_generated.deepcopy.go index a0796ef614..32742c02b8 100644 --- a/pkg/clients/generated/apis/spanner/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/spanner/v1beta1/zz_generated.deepcopy.go @@ -131,6 +131,11 @@ func (in *SpannerDatabaseSpec) DeepCopyInto(out *SpannerDatabaseSpec) { *out = new(string) **out = **in } + if in.VersionRetentionPeriod != nil { + in, out := &in.VersionRetentionPeriod, &out.VersionRetentionPeriod + *out = new(string) + **out = **in + } return } diff --git a/pkg/clients/generated/apis/sql/v1beta1/sqlinstance_types.go b/pkg/clients/generated/apis/sql/v1beta1/sqlinstance_types.go index 7c5bc989d1..11cacde30b 100644 --- a/pkg/clients/generated/apis/sql/v1beta1/sqlinstance_types.go +++ b/pkg/clients/generated/apis/sql/v1beta1/sqlinstance_types.go @@ -145,6 +145,10 @@ type InstanceLocationPreference struct { // +optional FollowGaeApplication *string `json:"followGaeApplication,omitempty"` + /* The preferred Compute Engine zone for the secondary/failover. */ + // +optional + SecondaryZone *string `json:"secondaryZone,omitempty"` + /* The preferred compute engine zone. */ // +optional Zone *string `json:"zone,omitempty"` @@ -174,6 +178,31 @@ type InstancePassword struct { ValueFrom *InstanceValueFrom `json:"valueFrom,omitempty"` } +type InstancePasswordValidationPolicy struct { + /* Password complexity. */ + // +optional + Complexity *string `json:"complexity,omitempty"` + + /* Disallow username as a part of the password. */ + // +optional + DisallowUsernameSubstring *bool `json:"disallowUsernameSubstring,omitempty"` + + /* Whether the password policy is enabled or not. */ + EnablePasswordPolicy bool `json:"enablePasswordPolicy"` + + /* Minimum number of characters allowed. */ + // +optional + MinLength *int `json:"minLength,omitempty"` + + /* Minimum interval after which the password can be changed. This flag is only supported for PostgresSQL. */ + // +optional + PasswordChangeInterval *string `json:"passwordChangeInterval,omitempty"` + + /* Number of previous passwords that cannot be reused. */ + // +optional + ReuseInterval *int `json:"reuseInterval,omitempty"` +} + type InstanceReplicaConfiguration struct { /* Immutable. PEM representation of the trusted CA's x509 certificate. */ // +optional @@ -257,7 +286,7 @@ type InstanceSettings struct { // +optional BackupConfiguration *InstanceBackupConfiguration `json:"backupConfiguration,omitempty"` - /* The name of server instance collation. */ + /* Immutable. The name of server instance collation. */ // +optional Collation *string `json:"collation,omitempty"` @@ -302,6 +331,10 @@ type InstanceSettings struct { // +optional MaintenanceWindow *InstanceMaintenanceWindow `json:"maintenanceWindow,omitempty"` + /* */ + // +optional + PasswordValidationPolicy *InstancePasswordValidationPolicy `json:"passwordValidationPolicy,omitempty"` + /* Pricing plan for this instance, can only be PER_USE. */ // +optional PricingPlan *string `json:"pricingPlan,omitempty"` @@ -311,10 +344,27 @@ type InstanceSettings struct { // +optional ReplicationType *string `json:"replicationType,omitempty"` + /* */ + // +optional + SqlServerAuditConfig *InstanceSqlServerAuditConfig `json:"sqlServerAuditConfig,omitempty"` + /* The machine type to use. See tiers for more details and supported versions. Postgres supports only shared-core machine types, and custom machine types such as db-custom-2-13312. See the Custom Machine Type Documentation to learn about specifying custom machine types. */ Tier string `json:"tier"` } +type InstanceSqlServerAuditConfig struct { + /* The name of the destination bucket (e.g., gs://mybucket). */ + BucketRef v1alpha1.ResourceRef `json:"bucketRef"` + + /* How long to keep generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".. */ + // +optional + RetentionInterval *string `json:"retentionInterval,omitempty"` + + /* How often to upload generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". */ + // +optional + UploadInterval *string `json:"uploadInterval,omitempty"` +} + type InstanceValueFrom struct { /* Reference to a value with the given key in the given Secret in the resource's namespace. */ // +optional @@ -346,7 +396,7 @@ type SQLInstanceSpec struct { // +optional ResourceID *string `json:"resourceID,omitempty"` - /* Immutable. Initial root password. Required for MS SQL Server, ignored by MySQL and PostgreSQL. */ + /* Immutable. Initial root password. Required for MS SQL Server. */ // +optional RootPassword *InstanceRootPassword `json:"rootPassword,omitempty"` diff --git a/pkg/clients/generated/apis/sql/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/sql/v1beta1/zz_generated.deepcopy.go index a22a2f20cf..818a8b9c95 100644 --- a/pkg/clients/generated/apis/sql/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/sql/v1beta1/zz_generated.deepcopy.go @@ -262,6 +262,11 @@ func (in *InstanceLocationPreference) DeepCopyInto(out *InstanceLocationPreferen *out = new(string) **out = **in } + if in.SecondaryZone != nil { + in, out := &in.SecondaryZone, &out.SecondaryZone + *out = new(string) + **out = **in + } if in.Zone != nil { in, out := &in.Zone, &out.Zone *out = new(string) @@ -337,6 +342,47 @@ func (in *InstancePassword) DeepCopy() *InstancePassword { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancePasswordValidationPolicy) DeepCopyInto(out *InstancePasswordValidationPolicy) { + *out = *in + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.DisallowUsernameSubstring != nil { + in, out := &in.DisallowUsernameSubstring, &out.DisallowUsernameSubstring + *out = new(bool) + **out = **in + } + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + *out = new(int) + **out = **in + } + if in.PasswordChangeInterval != nil { + in, out := &in.PasswordChangeInterval, &out.PasswordChangeInterval + *out = new(string) + **out = **in + } + if in.ReuseInterval != nil { + in, out := &in.ReuseInterval, &out.ReuseInterval + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancePasswordValidationPolicy. +func (in *InstancePasswordValidationPolicy) DeepCopy() *InstancePasswordValidationPolicy { + if in == nil { + return nil + } + out := new(InstancePasswordValidationPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceReplicaConfiguration) DeepCopyInto(out *InstanceReplicaConfiguration) { *out = *in @@ -533,6 +579,11 @@ func (in *InstanceSettings) DeepCopyInto(out *InstanceSettings) { *out = new(InstanceMaintenanceWindow) (*in).DeepCopyInto(*out) } + if in.PasswordValidationPolicy != nil { + in, out := &in.PasswordValidationPolicy, &out.PasswordValidationPolicy + *out = new(InstancePasswordValidationPolicy) + (*in).DeepCopyInto(*out) + } if in.PricingPlan != nil { in, out := &in.PricingPlan, &out.PricingPlan *out = new(string) @@ -543,6 +594,11 @@ func (in *InstanceSettings) DeepCopyInto(out *InstanceSettings) { *out = new(string) **out = **in } + if in.SqlServerAuditConfig != nil { + in, out := &in.SqlServerAuditConfig, &out.SqlServerAuditConfig + *out = new(InstanceSqlServerAuditConfig) + (*in).DeepCopyInto(*out) + } return } @@ -556,6 +612,33 @@ func (in *InstanceSettings) DeepCopy() *InstanceSettings { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSqlServerAuditConfig) DeepCopyInto(out *InstanceSqlServerAuditConfig) { + *out = *in + out.BucketRef = in.BucketRef + if in.RetentionInterval != nil { + in, out := &in.RetentionInterval, &out.RetentionInterval + *out = new(string) + **out = **in + } + if in.UploadInterval != nil { + in, out := &in.UploadInterval, &out.UploadInterval + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSqlServerAuditConfig. +func (in *InstanceSqlServerAuditConfig) DeepCopy() *InstanceSqlServerAuditConfig { + if in == nil { + return nil + } + out := new(InstanceSqlServerAuditConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceValueFrom) DeepCopyInto(out *InstanceValueFrom) { *out = *in diff --git a/pkg/clients/generated/apis/storage/v1beta1/storagebucket_types.go b/pkg/clients/generated/apis/storage/v1beta1/storagebucket_types.go index 01de5bf221..bdaa42c352 100644 --- a/pkg/clients/generated/apis/storage/v1beta1/storagebucket_types.go +++ b/pkg/clients/generated/apis/storage/v1beta1/storagebucket_types.go @@ -66,10 +66,18 @@ type BucketCondition struct { // +optional DaysSinceNoncurrentTime *int `json:"daysSinceNoncurrentTime,omitempty"` + /* One or more matching name prefixes to satisfy this condition. */ + // +optional + MatchesPrefix []string `json:"matchesPrefix,omitempty"` + /* Storage Class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, DURABLE_REDUCED_AVAILABILITY. */ // +optional MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` + /* One or more matching name suffixes to satisfy this condition. */ + // +optional + MatchesSuffix []string `json:"matchesSuffix,omitempty"` + /* Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition. */ // +optional NoncurrentTimeBefore *string `json:"noncurrentTimeBefore,omitempty"` diff --git a/pkg/clients/generated/apis/storage/v1beta1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/storage/v1beta1/zz_generated.deepcopy.go index 9b7e9f14ae..dd071a6841 100644 --- a/pkg/clients/generated/apis/storage/v1beta1/zz_generated.deepcopy.go +++ b/pkg/clients/generated/apis/storage/v1beta1/zz_generated.deepcopy.go @@ -78,11 +78,21 @@ func (in *BucketCondition) DeepCopyInto(out *BucketCondition) { *out = new(int) **out = **in } + if in.MatchesPrefix != nil { + in, out := &in.MatchesPrefix, &out.MatchesPrefix + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.MatchesStorageClass != nil { in, out := &in.MatchesStorageClass, &out.MatchesStorageClass *out = make([]string, len(*in)) copy(*out, *in) } + if in.MatchesSuffix != nil { + in, out := &in.MatchesSuffix, &out.MatchesSuffix + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.NoncurrentTimeBefore != nil { in, out := &in.NoncurrentTimeBefore, &out.NoncurrentTimeBefore *out = new(string) diff --git a/pkg/crd/fielddesc/testdata/pubsubsubscription-spec.golden.yaml b/pkg/crd/fielddesc/testdata/pubsubsubscription-spec.golden.yaml index 7439d0478e..ed13915c28 100644 --- a/pkg/crd/fielddesc/testdata/pubsubsubscription-spec.golden.yaml +++ b/pkg/crd/fielddesc/testdata/pubsubsubscription-spec.golden.yaml @@ -46,6 +46,97 @@ children: requirementlevel: Optional children: [] additionalproperties: [] +- fullname: + - spec + - bigqueryConfig + shortname: bigqueryConfig + description: |- + If delivery to BigQuery is used with this subscription, this field is used to configure it. + Either pushConfig or bigQueryConfig can be set, but not both. + If both are empty, then the subscriber will pull and ack messages using API methods. + type: object + requirementlevel: Optional + children: + - fullname: + - spec + - bigqueryConfig + - dropUnknownFields + shortname: dropUnknownFields + description: |- + When true and useTopicSchema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. + Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog. + type: boolean + requirementlevel: Optional + children: [] + additionalproperties: [] + - fullname: + - spec + - bigqueryConfig + - tableRef + shortname: tableRef + description: The name of the table to which to write data. + type: object + requirementlevel: RequiredWhenParentPresent + children: + - fullname: + - spec + - bigqueryConfig + - tableRef + - external + shortname: external + description: 'Allowed value: string of the format `{{project}}.{{dataset_id}}.{{value}}`, + where {{value}} is the `name` field of a `BigQueryTable` resource.' + type: string + requirementlevel: Optional + children: [] + additionalproperties: [] + - fullname: + - spec + - bigqueryConfig + - tableRef + - name + shortname: name + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + requirementlevel: Optional + children: [] + additionalproperties: [] + - fullname: + - spec + - bigqueryConfig + - tableRef + - namespace + shortname: namespace + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + requirementlevel: Optional + children: [] + additionalproperties: [] + additionalproperties: [] + - fullname: + - spec + - bigqueryConfig + - useTopicSchema + shortname: useTopicSchema + description: When true, use the topic's schema as the columns to write to in BigQuery, + if it exists. + type: boolean + requirementlevel: Optional + children: [] + additionalproperties: [] + - fullname: + - spec + - bigqueryConfig + - writeMetadata + shortname: writeMetadata + description: |- + When true, write the subscription name, messageId, publishTime, attributes, and orderingKey to additional columns in the table. + The subscription name, messageId, and publishTime fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. + type: boolean + requirementlevel: Optional + children: [] + additionalproperties: [] + additionalproperties: [] - fullname: - spec - deadLetterPolicy diff --git a/pkg/crd/template/testdata/computeinstance-spec.yaml.golden b/pkg/crd/template/testdata/computeinstance-spec.yaml.golden index b9779a0be4..f7d2b8ef90 100644 --- a/pkg/crd/template/testdata/computeinstance-spec.yaml.golden +++ b/pkg/crd/template/testdata/computeinstance-spec.yaml.golden @@ -118,6 +118,7 @@ resourcePolicies: namespace: string scheduling: automaticRestart: boolean + instanceTerminationAction: string minNodeCpus: integer nodeAffinities: - value: {} diff --git a/pkg/crd/template/testdata/pubsubsubscription-spec.yaml.golden b/pkg/crd/template/testdata/pubsubsubscription-spec.yaml.golden index 2a763abbcc..dc403987ed 100644 --- a/pkg/crd/template/testdata/pubsubsubscription-spec.yaml.golden +++ b/pkg/crd/template/testdata/pubsubsubscription-spec.yaml.golden @@ -1,4 +1,12 @@ ackDeadlineSeconds: integer +bigqueryConfig: + dropUnknownFields: boolean + tableRef: + external: string + name: string + namespace: string + useTopicSchema: boolean + writeMetadata: boolean deadLetterPolicy: deadLetterTopicRef: external: string diff --git a/pkg/crd/template/testdata/spannerdatabase-spec.yaml.golden b/pkg/crd/template/testdata/spannerdatabase-spec.yaml.golden index 84d5c7c40f..02087f9698 100644 --- a/pkg/crd/template/testdata/spannerdatabase-spec.yaml.golden +++ b/pkg/crd/template/testdata/spannerdatabase-spec.yaml.golden @@ -11,3 +11,4 @@ instanceRef: name: string namespace: string resourceID: string +versionRetentionPeriod: string diff --git a/pkg/dcl/schema/embed/dcl_assets_vfsdata.go b/pkg/dcl/schema/embed/dcl_assets_vfsdata.go index cefc245330..f05024d010 100644 --- a/pkg/dcl/schema/embed/dcl_assets_vfsdata.go +++ b/pkg/dcl/schema/embed/dcl_assets_vfsdata.go @@ -220,9 +220,9 @@ var Assets = func() http.FileSystem { "/compute/beta/instance_group_manager.yaml": &vfsgen۰CompressedFileInfo{ name: "instance_group_manager.yaml", modTime: time.Time{}, - uncompressedSize: 41403, + uncompressedSize: 41400, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x3d\xfd\x6f\x1b\x37\xb2\xbf\xe7\xaf\x18\x34\x38\x24\x39\x48\x8a\xd3\x5e\x8b\x3e\x17\x0f\x78\xaa\xa3\x34\xc2\x39\xb2\x21\xcb\x29\x7a\xb9\xc2\xa2\x76\x47\x12\x5f\x56\xe4\x96\xe4\xda\x56\x1f\xde\xff\xfe\xc0\xaf\xfd\xe4\xae\x3e\x1c\xf7\xd2\x77\x31\x70\xbd\xd8\x4b\x0e\xc9\x99\xe1\xcc\x70\x66\x38\x7c\x0a\x67\x3c\xdd\x0a\xba\x5a\x2b\xf8\xfa\xe4\xeb\xaf\xe1\x27\xce\x57\x09\xc2\xf9\xf9\xd9\x00\x86\x49\x02\x53\xfd\x49\xc2\x14\x25\x8a\x5b\x8c\x07\x4f\x9e\xc2\x93\xa7\x70\x4e\x23\x64\x12\x63\xc8\x58\x8c\x02\xd4\x1a\x61\x98\x92\x68\x8d\xfe\x4b\x0f\xde\xa3\x90\x94\x33\xf8\x7a\x70\x02\xcf\x75\x83\xaf\xdc\xa7\xaf\x5e\xfc\xf0\xe4\x29\x6c\x79\x06\x1b\xb2\x05\xc6\x15\x64\x12\x41\xad\xa9\x84\x25\x4d\x10\xf0\x3e\xc2\x54\x01\x65\x10\xf1\x4d\x9a\x50\xc2\x22\x84\x3b\xaa\xd6\x66\x18\x07\x44\xcf\xe3\x17\x07\x82\x2f\x14\xa1\x0c\x08\x44\x3c\xdd\x02\x5f\x96\xdb\x01\x51\x76\xc6\xfa\x67\xad\x54\x7a\xfa\xf2\xe5\xdd\xdd\xdd\x80\x98\xd9\x0e\xb8\x58\xbd\x4c\x6c\x4b\xf9\xf2\x7c\x7c\x36\x9a\x5c\x8d\xfa\x5f\x0f\x4e\x6c\x9f\x6b\x96\xa0\x94\x20\xf0\xb7\x8c\x0a\x8c\x61\xb1\x05\x92\xa6\x09\x8d\xc8\x22\x41\x48\xc8\x1d\x70\x01\x64\x25\x10\x63\x50\x5c\xcf\xf8\x4e\x50\x45\xd9\xaa\x07\x92\x2f\xd5\x1d\x11\xf8\xe4\x29\xc4\x54\x2a\x41\x17\x99\xaa\xa0\xcb\xcf\x8f\xca\x4a\x03\xce\x80\x30\xf8\x6a\x78\x05\xe3\xab\xaf\xe0\xc7\xe1\xd5\xf8\xaa\xf7\xe4\x29\xfc\x3c\x9e\xbd\xbd\xb8\x9e\xc1\xcf\xc3\xe9\x74\x38\x99\x8d\x47\x57\x70\x31\x85\xb3\x8b\xc9\xeb\xf1\x6c\x7c\x31\xb9\x82\x8b\x37\x30\x9c\xfc\x02\x7f\x1f\x4f\x5e\xf7\x00\xa9\x5a\xa3\x00\xbc\x4f\x85\x9e\x3f\x17\x40\x35\x22\x2d\xf5\xae\x10\x2b\x13\x58\x72\x3b\x21\x99\x62\x44\x97\x34\x82\x84\xb0\x55\x46\x56\x08\x2b\x7e\x8b\x82\x51\xb6\x82\x14\xc5\x86\x4a\x4d\x4e\x09\x84\xc5\x4f\x9e\x42\x42\x37\x54\x11\x65\xfe\xd2\x58\xd4\xe0\x09\x65\x4b\x7e\xfa\x04\x40\x51\x95\xe0\x29\x9c\xf1\x4d\x9a\x29\x7c\x39\x66\x52\x69\x72\xfe\x24\x78\x96\xbe\x23\x8c\xac\x50\x3c\x01\x88\x51\x46\x82\xa6\x1a\xda\x29\xcc\xd6\xe8\xdb\x43\xa8\x3d\x08\x94\x3c\x13\x11\x3e\x01\xb8\xef\xc7\x51\xd2\x97\x4a\x64\x91\xea\x33\xb2\xc1\xd3\x60\x97\xbc\xe5\x9a\xc8\x3e\x25\x9b\x53\x58\x92\x44\xe2\x93\x94\xa8\xb5\xd4\xd3\x5c\xa1\xd2\xff\x17\x98\xc9\x32\x63\x91\xfe\x4d\xf3\xa8\x21\xf2\x0a\x35\x6b\x2e\xb9\xd8\x98\xd5\x03\x59\xf0\x4c\x01\x69\x1b\x17\x20\x25\x82\x6c\x50\xa1\x90\x76\x88\x3e\x74\x4e\x54\xff\x78\x86\x3b\x05\x25\x32\x74\x7f\xac\x4c\x6d\x08\xcb\x2c\x49\x80\x3a\x18\x9a\xe9\x5b\xa7\xa0\x59\x76\xbb\xef\xf2\x4c\xe3\x3f\xd9\x02\x63\x4c\x50\xe1\xbe\x2b\xb4\xad\xff\x04\x0b\x1a\x26\xc9\xa1\x6b\x4a\x92\xc3\x57\x95\x0a\xfe\xdf\x18\xa9\xae\x85\xc8\x68\x8d\x1b\x72\xea\x7e\x03\x50\xdb\x14\x4f\x41\x0b\x2d\xb6\xaa\xc0\x4a\x78\x64\x98\xe6\x01\xc0\x12\x2a\xf7\xde\x8b\xba\x6d\x80\x57\x37\x84\x6d\xff\xe4\x78\xd0\x9a\x8f\x33\x64\xca\x4c\xd2\x36\x75\xf3\x0d\x2d\xcc\x43\x71\xe2\xb6\x83\x51\xad\x1c\xf4\xf3\x93\xbe\x63\x1f\x7e\xe7\x0c\xf3\x5f\x04\xae\x8a\xd9\xdb\x2e\x29\x11\xc8\x54\x3f\xe2\x4c\x2b\x5b\x14\x75\x84\x15\x02\x36\x12\x48\x14\x56\xd6\x1a\x94\xbe\xe5\x4f\x02\x49\xdc\x57\x74\x83\x3c\x53\xa7\x70\x52\xf9\x66\x44\x52\xdb\x47\xcb\xf9\xcd\xaf\x16\x9b\x7c\x11\x22\x69\xbe\x4c\x4d\xac\xfc\x17\x45\xc4\x0a\xd5\x15\xfd\xbd\xf8\x53\x75\x85\xa9\xe0\x29\x0a\x45\x51\x16\x44\x23\x99\xe2\x6f\x91\x24\x94\xad\x2e\x79\x42\xa3\xca\x47\x3f\x0b\x22\x04\xd9\x96\xfe\x6a\x67\xbe\xe2\x4e\x63\x0d\x9b\x30\x4a\x8d\x1b\xbb\x40\x0f\xb9\xb6\xcd\x21\xd5\xed\xb7\x4e\x85\x53\xa9\x39\x9f\xac\x30\x2e\xa4\xcc\x4a\x73\xc0\x40\x5b\x4a\x25\x90\x00\x11\x61\x4e\xdf\x6f\x81\xb3\x44\xff\x07\xe1\x96\x24\x19\x0e\x1a\xf3\x94\xc8\xe2\x3e\x6e\x52\xb5\xad\x90\xb4\xc4\x4c\x54\xaa\xbe\x5d\xa9\xfe\x67\xe9\x3b\x55\xb8\xa9\xe0\x23\x48\x97\x1a\x52\x6c\x8b\x10\x0b\x77\x23\x2a\x4c\x20\xfb\xa3\xf1\xa5\xd6\x67\x6b\x8c\x3e\xd6\x3f\x05\x76\x71\x70\x5a\x96\x56\x6f\x0b\x40\x8d\xa6\x0d\x4a\x5d\x4f\xcf\x73\xeb\xca\xce\x00\x22\xdd\x13\xd4\x9a\x28\x90\x74\xc5\x48\x22\xcb\xf4\x1c\xb4\x0c\x2f\x70\x89\x02\x59\xd4\x5c\x98\xdd\xac\xd6\x1e\x2a\xcc\xac\xae\x59\x02\x2c\x29\x26\xf1\x29\x48\x4c\x96\xe7\x94\xd5\x5b\x50\x46\x15\x25\xc9\x6b\x4c\xc8\xf6\x0a\xa3\x36\x74\x51\xa6\xb0\x90\x2a\x25\xe0\x46\x16\x9b\xef\xdf\xfd\x6d\x17\x36\xc7\xd5\xb1\x76\x63\x94\x65\x9b\x05\x0a\xad\x3b\x25\x46\x9c\xc5\xd2\xa2\x52\x23\x38\xcc\xfa\x81\xe5\xdf\x11\xaa\x24\x2c\x70\xc9\x05\x02\x55\xd6\xa2\x47\xd9\xdc\x57\xfa\x8f\x8a\x03\xc3\xbb\x1c\xa8\xb6\xa5\x03\x20\x05\x46\xc8\x54\xb2\xd5\xff\x30\xc2\xaf\x98\x86\x1c\xc0\x4c\xef\x4c\x87\x56\xad\xa9\xc9\x56\x2b\x6a\x7e\x27\x8b\x46\x01\x98\xe6\x3c\x61\xfa\xd0\xdf\x51\x1b\xdd\x20\x32\xa6\x57\x4a\x05\x48\x45\x84\xca\x52\xb0\xd8\xc9\x17\xa3\xd1\xe0\x41\x06\x20\x1a\x84\x40\xac\x55\xdf\x86\x32\x2c\x70\xb7\x05\x22\x10\xae\x27\x6f\x47\xc3\xf3\xd9\xdb\x5f\xdc\x8c\x53\x81\xb7\x5a\x01\x95\xb1\x1b\x80\x5a\xc5\x37\x2c\x05\xdf\x78\x34\x68\x4c\x6a\x5c\x17\xd8\x4b\x05\x6e\x88\xca\x04\x26\x5b\x37\x8a\x11\x3a\x01\xb0\x9b\x4c\x2a\x58\xa0\x83\x47\xd8\x0a\xe1\xc3\x49\x0f\xbe\xf9\xee\xe4\xe4\xd7\x62\x9b\x2c\x88\x44\x2f\x29\x26\x9a\xa5\x1a\xb2\xb7\xb1\xad\x6b\x2c\xf8\x63\x0d\x42\x97\xe4\xd5\xa3\x15\xeb\xd5\xfd\x35\x95\x32\x77\x86\x2a\x56\x49\x99\x95\xc6\x56\xf8\x56\x16\xa7\xc1\x98\x25\xe7\x0b\x7c\xd5\xff\xf6\x7b\x88\xd6\x44\x90\x48\xdb\x24\x90\x70\xb6\x1a\xe4\xe2\x4f\x1a\xd2\xe8\xa1\xfc\xe9\x13\x59\x5c\x97\x53\x04\xd6\xdb\x74\x8d\xcc\x70\x09\xd1\xd8\x8a\xf9\x06\x96\x3c\x13\xfd\x1c\xb0\x43\x84\x9e\xb0\x0a\x2e\xa5\x39\xcf\xc0\x72\xcd\xa4\xcd\xa9\x7c\x6b\x4f\xe4\x1f\xa6\x6f\xce\x5e\x9d\x7c\xf3\xed\xaf\xcf\xf5\xd9\x5a\xba\xc3\x35\x45\xb5\x34\x47\x6b\xb1\x8c\xf4\xff\x74\x8b\x81\xba\x57\x2f\x8a\x41\x2c\x83\x70\x36\xa3\x1b\x94\x8a\x6c\xd2\xc3\x49\x77\x56\x07\x51\x6a\xaa\xed\x89\x0b\x96\x34\x55\x56\x83\xa8\x7e\x22\xa0\x3c\x98\x5d\xea\x54\xd3\xf7\x9f\x7a\xe1\xdf\x7c\xf3\xcd\x7f\xfc\xb3\x7b\xe5\xba\x89\x59\x79\x55\x09\xe2\xbd\x72\x92\xb2\xaa\x6f\x3f\x66\x0b\x14\x0c\x15\xca\x3e\xdd\x6c\x32\x45\x16\x09\xd6\x96\x10\x65\x42\x1b\x62\xc3\xa8\x62\xbf\x41\xbb\x6a\xad\x63\xad\xd2\x3f\xd4\xae\x5d\x01\xb7\xf6\xdd\x0f\xdd\xcf\x3e\x5c\x64\x2a\xcd\x14\xe8\xa6\xbf\x1a\xec\x1b\x0b\x9e\x2f\x0b\x0c\x13\x0b\xdb\xb0\xb2\xca\x25\x7e\x05\x7b\xa5\xe6\xc5\x5e\x6b\x21\x95\x11\x71\x7a\x0b\x69\x13\x3a\xce\x12\x8c\x0d\x75\x91\x44\xeb\x3a\x4c\xb5\xe6\x32\x1f\x7f\xf0\xec\x50\xba\xb4\x9b\x1f\x64\xa1\x37\x24\xa3\x6c\x55\x57\xa6\x5d\xaa\xb4\x4b\x91\xd6\x0d\xc8\x7c\x80\x5a\xbb\x56\xb2\xec\x43\x1a\xc5\x15\x49\x4a\x1a\xb7\x86\xf3\x76\x9d\xb0\x9b\x04\x8a\x6b\xc9\xe7\xf0\x82\xf1\xa0\xb4\x02\x20\xac\x5d\x87\x09\xdc\xf0\x5b\x3d\x01\x65\xf5\x42\xbb\xde\x37\xd2\x49\x9f\x05\xcd\xf9\xc0\x2a\xa2\x0a\x4d\x0f\xa0\xab\x17\x56\x8f\x46\xbf\x33\x07\xfe\x53\x52\x6f\x37\xdd\xda\xd1\xbc\x83\x6e\xde\xc4\xe1\xc2\x7c\x76\x02\x29\xd9\xc2\x02\x35\xa2\xdd\xe7\xa6\x2d\x3b\xb6\x8e\x59\x67\x27\x10\x9a\x18\xf3\xca\x36\x07\x7d\x64\xb7\x9e\xdb\x92\xd2\x91\x3d\x4d\x6a\x25\x8c\x79\xb6\x22\x94\x35\x60\x66\x4c\xd1\x44\x37\xb2\x60\x64\xc5\x06\x02\x99\x45\x11\x4a\xb9\xcc\x12\x6d\x6f\x8c\x97\xc6\xe9\xbc\x26\xb7\x08\x31\x95\x9a\xd6\x71\x2e\xfe\x03\xbc\x66\xc6\xed\x79\xc7\x34\x26\x31\xdc\xd1\x24\x31\x0e\xeb\x05\x42\xca\xd3\x2c\xd1\x0b\xfd\xc1\x8c\x87\x24\xee\x99\xc1\xe7\x9e\x59\x7e\xb6\x1c\x38\xb5\x70\xe6\x4d\x4b\xb9\x00\x59\x06\xf7\x70\x2e\xad\x0e\xfc\xc8\x3c\x5b\x1d\xec\xb1\x39\xb8\x6e\xed\xef\x92\x3d\x06\xbb\x44\x29\x7d\x7c\x2d\x98\x6d\x60\xe0\xdb\x16\xee\x63\x89\x15\x1b\x30\xb5\xb2\x28\xf9\xed\xec\x71\x39\xc2\xc1\xfe\x0c\xdd\x3a\x4d\xcb\xe0\xb1\x31\x97\xa5\xe3\x5e\x03\xee\xd9\x33\x09\xf3\xc2\x23\x31\x77\x46\x23\x89\x22\x2e\xb4\xfd\x97\x6c\x8f\xe7\x13\x2f\x12\x1f\x89\x33\x5e\x3b\xf0\x7f\x12\x69\x66\x1d\x48\xad\xd2\xcc\x7d\x3e\x1e\xdb\x8c\x33\x7c\x24\x4c\x4f\x0a\x97\xdd\xe7\x86\x65\x91\x31\xa7\xcd\x63\x2b\x70\x19\x2f\x61\x3e\x64\x64\x1d\x84\xd4\xe2\x80\xf9\x48\xa8\x9d\xe6\x03\x7c\xa6\x08\xae\xb3\x71\xe1\x79\x68\x61\x64\xf3\xdf\x80\x96\xf3\xea\xba\xb4\xe4\xb2\x0d\xe6\x36\x80\x15\x4d\x78\x4f\xa5\x69\x20\x38\x57\x90\xa2\x90\x54\x2a\x64\xaa\xe9\xb8\xa1\xf2\xa3\xa1\xbd\x57\xcb\xc4\xb8\x51\xcc\x9f\x73\xcb\x8d\x6e\xc8\x0a\xed\x8a\xa8\x84\x18\x97\x94\x19\x04\xe8\x8f\xed\x82\x5d\x8b\x6b\xad\x28\x1f\xc2\x3c\x4b\x81\x72\xfd\x98\xcc\xe3\x07\xf8\x4c\x99\xc7\x72\x84\xc0\x88\xb3\x25\x5d\x65\x02\x63\x7b\x9a\x2f\x4e\x30\xb6\x6d\xcc\x8d\xb5\xe3\x1c\xe7\x0d\xa8\x04\x04\x1a\x57\x94\xe1\xb9\x9c\x97\xdc\xf6\x1e\xc0\x1b\x7d\xd2\xba\x27\x9b\x34\xc1\x1e\x48\x54\x86\x75\xb8\xb0\x66\xbc\x71\x46\x18\x05\xd7\x80\x9b\x72\x9e\xc8\xdc\x73\xea\x17\xf5\x10\x82\x9b\x59\x3e\x26\xc1\xfd\x00\x9f\x29\xc1\x9b\xd2\xc2\x4c\xb8\x5d\x5a\xe4\x0d\x8e\xc7\xfa\x2d\x0a\xba\xdc\x3e\x1e\xd2\xdf\x7b\xf8\x9f\x29\xce\x2d\x22\x0d\x16\xa8\x96\xaf\x3e\xef\x62\xee\x00\xe5\x2e\xbe\x0f\xbf\x0e\x2a\x4e\x9d\xe6\x51\xc1\xed\xcb\xad\x9f\xca\x3c\xa1\x52\xbd\xab\x81\x99\xc3\x06\xd5\x9a\xc7\x10\xf3\x28\xdb\x20\xb3\xd9\x19\xc7\xd1\xaf\x8c\xa6\x83\xfd\x72\xaf\x8b\xce\x6d\x9e\xa0\x21\x03\x6e\xfe\x69\x3c\xe2\xf9\x07\x6b\x30\x53\x99\x87\x34\x0e\xf6\x8e\xe5\xa9\x34\x94\x33\x13\x23\xda\x1e\xee\x21\x7b\xdd\x80\x71\x98\x97\xac\xb3\x7f\x05\x0d\xf6\xab\x8f\xc4\x19\x89\x68\x04\x9e\x42\x16\x63\x5c\x59\x8c\x46\x4d\x9d\x03\xab\xc7\x2d\x12\x09\x2e\xa5\x89\xe3\x1a\x9e\x25\x2e\x8a\x4b\x92\xb6\xb0\xe0\x1e\xbe\x2b\x77\x04\x59\x93\xb4\xc5\x88\x0d\x86\xcd\x6a\x08\x9d\x15\x40\xda\x1a\x1e\x82\xcd\x12\xb8\x11\xcb\x36\x9d\x3b\x5c\xef\xe9\x0a\x1a\xa5\xee\xa6\xa5\xe0\xdd\x9a\x46\xeb\xd2\xf9\x2d\xe2\xec\x16\xc5\x0a\xa5\xcb\x9e\x0a\x6d\x42\xad\xd8\x6e\x51\x9f\xff\x04\x70\xa6\xb9\x94\xfe\x8e\xe0\xc2\x24\xcf\x63\x74\xce\x79\xfd\x4d\xe5\x8e\x7e\x69\x12\x86\x60\x9e\xa5\x31\x51\x68\x97\x30\xf0\x94\x98\x62\x79\x76\xb3\x6d\x8a\xf3\x17\x4d\xcf\xc9\x25\x97\x92\x2e\x12\x07\x51\x9e\xc2\x6c\x38\xfd\x69\x34\xbb\xb9\x7a\x3b\xbc\x1c\xdd\x5c\x4f\xae\x2e\x47\x67\xe3\x37\xe3\xd1\xeb\x1e\x0c\x27\xbf\xf4\xe0\xc7\xe1\xf9\x70\x72\xe6\x7e\xbd\xb9\x1a\x4f\x7e\x3a\x1f\xdd\xfc\xe3\x62\x32\xaa\x0b\x03\x64\xd9\xa6\x4e\xd7\x7e\x2b\xf4\x46\xc3\xe1\xe4\x97\xc6\xdf\xfc\xd8\xa1\xc6\xe5\xa9\x54\xbe\x1b\xa6\x0d\x33\x58\x3d\x78\x0e\x4d\xfe\xfa\x87\xee\xdd\xc5\x06\xa6\x01\xdc\xad\xd1\x05\xc9\x76\x6c\x0c\xeb\x36\x68\xf1\x06\x68\xab\xd6\x76\xab\xc6\xb6\xea\x54\xdb\x5b\x4f\xee\x11\x67\x87\x3d\x62\xed\x10\x8e\xb7\x43\x57\xcc\x1d\x8e\xdd\x82\x21\x8c\x77\xc5\xdf\xc1\xd1\x38\xf4\xf7\x1d\x01\x78\x08\xd3\x3b\xd8\x2e\x18\x81\x77\xf9\xa0\x1f\xf4\xf8\xbf\x3e\x7f\x19\xb9\x18\x79\xcc\x23\xf9\xd2\x32\x82\xec\x1b\xfe\x7b\xf9\x94\xdc\x12\x9a\x68\x32\x05\xf6\xa0\xfd\xd1\x40\x75\x63\x1b\x09\x33\xe7\x21\xaf\x94\x2d\xac\x12\x97\xed\x34\x18\xec\x8f\x8b\x29\x49\x9b\x34\x14\x72\x9c\xc2\xbe\xec\xb4\x24\x34\xe1\xb7\x28\xac\x1d\x71\xb8\xe6\x7e\x53\xe9\x7f\x98\xd6\xab\xf6\xad\x09\xe6\xa6\x50\xb6\x67\x04\x2d\x88\x53\x14\xda\xea\x33\xf9\xbe\x44\x9a\xc4\x38\x83\x61\xbd\x98\x4c\xe0\xc0\x58\x67\x45\x82\x4a\x05\x3b\x54\x82\xcc\xd2\x94\x6b\x4b\xb5\x07\xf3\xc9\xc5\xcd\x9b\xe1\xf8\xfc\xe2\xfd\x68\x3a\xb7\x4e\xbe\x18\x97\x24\x4b\xcc\x01\xb3\xf6\xb5\x2e\x53\x2b\x70\xaf\x27\x7f\x9f\x5c\xfc\x3c\xe9\x41\xa9\x4f\x59\x76\xd6\xe5\x66\xdf\xf7\xa8\xfc\xad\xd4\xb9\x20\x11\x65\x2b\x14\xa9\xa0\x4c\x1d\x41\x9f\xa2\x73\xa9\xd1\x7e\xc1\xb7\x52\xdf\xa6\x99\x65\x63\xf1\xd6\x23\xbd\x21\xfa\x10\x60\x73\xec\xa8\xb5\xd3\x36\xfa\xd8\x5f\x4d\xcf\x48\x78\xf4\x91\x9a\x48\xb5\xca\x7d\xd8\x74\xc5\xb8\x39\x4b\xae\xd1\x38\x10\x50\x78\x6f\x42\x88\x61\x06\x30\xac\x7a\xdd\xb3\xb4\xaf\x78\x5f\x2b\xc9\x32\x96\xf2\x40\x79\x2a\xf8\x2d\x8d\xdd\xa4\x84\xc9\x38\xe6\x60\x95\xaa\xd9\x6e\xa1\x31\x7a\x95\x01\xb8\xd6\xeb\x77\x54\x7a\x25\xf0\x5b\x86\xd2\xcd\x5e\x33\x9b\x3d\x03\xa3\x10\x5c\xc0\xfc\x6f\xaf\xbe\xd6\x26\x41\x4c\x35\xf6\x26\x5c\xbd\x43\xa5\x59\x8a\x83\xc4\x2a\x0b\x6a\x50\x09\x51\x1a\x52\x69\xd6\x3d\xd8\x90\x8f\x08\x04\xe6\x2b\x54\xcf\x5f\xcc\xf3\xd1\x14\x77\x91\x85\x5b\x6c\x45\xcc\xa1\x46\x2f\x8d\x9b\xac\xd4\x3c\x5e\xb5\x1d\xad\xea\x39\x39\xf1\xc1\xac\x55\x3b\x47\x0d\x21\x63\xf4\xb7\x0c\x81\xc6\xc8\x94\x3e\xfd\x88\x22\xa4\xee\x59\xce\x4c\xb3\x99\x73\x60\x2e\x19\x08\x58\x21\x43\xe1\xe2\x39\x54\x96\x00\x1d\x1e\x97\xa5\x65\x0c\x1f\xbe\xe5\x2a\x04\x7a\x28\x66\x6a\x2a\xc9\xc3\x06\x03\xbc\xd8\x8d\x47\xc4\x9e\xbb\xf2\xc4\x42\x19\x62\x6d\xcb\x6a\xcf\x0e\xf3\x78\x9c\x39\xff\xdb\xf1\xa8\xf4\x10\xda\x90\x56\xc3\x52\xc3\xf3\x97\xbb\x0c\xdd\x8d\x05\x17\xd3\xd7\x9c\x52\x59\x49\x5b\x32\x64\x11\xfc\xc9\xa4\xe7\xb0\x02\x76\x11\xc0\x49\x12\x60\x78\x57\xd5\x37\xbb\x9c\x02\xe5\x31\x3c\x4c\xeb\xca\xca\x7d\xa7\xe1\xd3\x9b\x83\xe7\x72\xb4\xac\xdf\x2d\x5a\x9b\xbc\xa7\xcc\xde\x42\xd9\xf2\xcc\xa4\x80\xcd\xbd\x9f\xad\x38\xf9\xf7\xec\x07\x93\x28\x7b\x6d\x24\xa2\x9c\xf1\xd2\xe7\xaa\x14\x14\xe6\x48\x52\x84\x9a\x64\xed\x6c\xa2\xa9\x39\xd7\x88\x98\x5f\x4e\x2f\x86\x67\xb3\xf1\xfb\xd1\xbc\x99\x16\x1a\x71\xb6\x4c\x68\xa4\x6a\xac\x76\x6b\x6f\xfd\x34\x13\x4b\xec\xce\xee\x3b\x85\xfc\xc9\x18\x38\xc0\x4c\xed\x3c\xec\xd3\x9e\x0f\xe7\xdd\xf3\x6a\x42\x77\x90\x67\x3d\xf4\x87\x7b\x31\xd8\x51\xa9\x6c\xbb\xd2\xd7\x4c\x06\x97\xdb\x54\x5d\x7c\x5b\x64\x7a\x99\xf4\xb4\xef\xbe\xa9\xe0\xb6\x96\xaa\xd6\xb3\x6e\xfe\x4f\x91\x12\x76\x00\x72\xe2\x4b\x2e\xd4\xe1\x99\xd6\x93\xbc\x6b\x1b\x9e\x4c\x0b\xd0\x06\xa5\x84\x92\x77\xdc\xfb\xa2\xab\x12\x5b\xda\x75\xa3\x71\xb6\x89\xea\x21\xd5\xe4\xd9\x51\x59\x97\xf1\xc7\xaa\xf8\xcf\x2f\x19\x3b\x88\xcb\xae\x33\x60\x9d\xa9\xcb\x53\xd8\x2f\xf9\xba\xc6\xe0\x01\xf2\xe5\xec\x9b\xdb\x1b\x2c\x27\x68\x0b\x73\x97\x18\x3a\x70\xf0\xfa\xe4\x2c\x7e\x10\xcd\x0d\x42\xb9\x50\x8f\x9a\x84\xad\x69\xb8\x1b\xab\x7a\x1a\xce\x3b\xde\x73\xbe\xb3\x88\x30\x93\xc6\xe5\xfc\x5c\x0b\x54\x77\x88\x0c\x5e\x19\x74\x7d\xf7\xed\xb7\xdf\x7c\x7b\xe4\xc2\xdd\xb5\x8b\xc3\x25\xe0\x65\xe5\xbe\x46\x78\x1d\xb6\x49\xbe\xa3\x4b\x97\x09\x0f\x24\xce\xde\x1a\x2b\xe1\x59\xec\x7f\xb5\x72\x57\xbc\x6c\xce\x34\xd7\x5a\xac\xcc\xe5\xd6\xa9\x70\x38\x26\xa6\xe5\x0b\x3c\xf0\x89\xec\xd5\x0f\x76\x36\x7b\x3b\x51\x2a\xab\x6b\xf7\x8b\x38\xa3\x47\xa0\xa4\x31\x4a\x78\xbe\x34\x21\x42\xe7\xa1\xf3\x88\x93\x2f\x0e\x37\x8b\xbd\xfe\x3f\x1c\x7d\x57\xcd\xbb\x11\x0f\x42\xe0\xce\x0b\x3a\xb3\x5a\xc8\xdb\x9d\x83\x6c\x50\xdc\xd9\xa8\xd7\xd3\xf3\x63\x70\x20\x6e\x69\x84\xc3\x28\xe2\xd9\x31\x2e\x87\xab\x4a\xff\x4e\x97\x8e\x1b\xca\x24\x29\x65\x4c\xb9\x50\xa3\xf1\x23\x10\xa9\xad\x6a\x73\x8a\x23\x2e\xb4\xab\xcd\x6b\xad\x2a\x48\x3d\x97\x18\xbc\x47\xc8\xa6\xbc\x77\x70\x0c\x67\xd5\x6b\x16\x8d\x29\x54\xc1\x32\xc4\x58\x9a\x71\xcb\xd7\x99\xf3\xcb\xdd\xe5\xd4\xad\xd8\xdf\xa8\x2c\xc1\xff\x71\xeb\xbd\x49\xbd\x86\x03\xa0\x36\xee\x29\xfc\x8f\x93\x33\x13\x23\x31\xff\xf7\xbf\x22\x2d\x04\x5c\x2b\x39\x58\xb9\x7f\xb9\xe6\x83\x88\x6f\xea\x6e\x2d\x8d\xb6\x1a\xb5\xf7\x13\x37\x63\xb2\x79\xd9\x4a\xb4\x5c\xc8\xe0\x86\xd0\xa4\x60\x12\x45\x14\x2e\xb3\xe4\xd8\x88\xd9\x55\xa5\xff\x61\x7e\xc3\xd6\xbe\x15\x06\xf3\xad\x72\xa3\xcc\x5a\xda\xf9\xbe\xf2\x90\xe3\xaa\x9d\x55\x02\xd7\x66\x98\xa4\xc2\x55\x36\x30\x43\x84\x63\x10\x41\xd7\x79\x43\xf7\x94\xe1\xb4\xb5\xdd\x17\x0f\x9d\xd0\xba\xfc\xec\x31\x95\x1f\x83\xee\xf7\xce\x20\x00\x00\x89\xad\xab\x8b\x24\x97\x9d\x5e\xfc\x9d\x80\x1e\xbc\xd8\xd7\x7a\x05\x41\xc0\xdd\xf1\x05\x70\xd7\x31\x5f\x97\x6e\x83\xb7\xcf\xbf\x23\xda\x00\xe1\x2b\x9a\x16\xee\xee\x1e\x0f\x58\x75\x31\x4c\x20\xb0\x59\xfc\x34\x44\xaf\xc4\x7c\x0b\x5b\x06\x70\x99\xca\xa8\x75\x48\x29\xc7\x31\xce\x3a\x17\x0d\xe5\x3b\x70\x3d\xef\x5b\xe5\x02\xde\xbf\x2b\x44\x6f\x9e\xe5\x53\x08\xef\xb6\x60\x09\x18\xa7\x1e\x95\xb0\x4c\xc8\xca\x0b\x35\x23\x67\xfd\xc9\x0a\xa8\xb5\x2c\x4c\x62\x98\x5c\xf3\x2c\x89\x4b\xf3\xed\x9a\xe8\x52\xa1\x00\x6a\xdc\x40\x8c\x1b\x33\x1d\x85\x85\xef\x74\x86\x51\x11\x3d\xc0\xc1\x6a\x60\x1d\xd2\xe6\x8f\xf4\x16\x9b\xa9\xde\xc5\x4f\x91\xe5\x6b\xad\xc3\xbb\x35\x4f\xb0\x88\xd0\xf8\x64\x50\x98\x70\x85\xa7\x0e\xd5\x44\x29\x12\xad\x3b\x67\x4b\x19\x4c\x47\xc3\xd7\x37\x17\x93\xf3\x5f\x60\xc3\x63\xd4\xb6\xb3\xcb\x23\xd7\x28\xef\xe7\x80\x3b\xa3\x12\xd5\x9f\xc9\xe8\xfd\x68\xda\x83\x8b\xc9\xcd\xe5\x68\xfa\x6e\x38\x19\x4d\x66\x37\xe3\xc9\xd5\x6c\x38\x39\x1b\xdd\xbc\x1e\x9d\x8f\x66\xe3\x8b\x49\x3d\xe0\x5b\xfc\x84\x42\xbf\xc5\x4f\xdf\x82\xef\xf8\xde\x3d\x6e\xa0\x63\x33\xcb\x22\xb8\xcd\x2b\xcc\x6d\x1a\x15\x17\x0e\x58\xc5\x21\xe8\x92\xd4\xf2\x0c\x7a\xbf\x9f\x82\x93\xe6\x8d\x94\x46\xcf\xe0\x3d\x40\x15\xb9\x08\xc8\x86\x18\x3a\x7f\xc4\xad\x4f\x89\x33\xdc\x89\x5a\x9b\x06\xc1\xea\xb5\x48\x6f\x1f\x1b\x7e\x68\x6e\x06\xbc\x57\x28\x18\x49\xc6\xe9\x9f\x5b\x30\x8f\x8a\x75\xfc\xdb\x89\xe7\xd2\xda\x3f\x85\x90\x1e\x5f\xd6\x45\xb4\xc0\x04\x89\xfc\x9c\x65\xf4\xf8\x12\x2c\x4a\x88\x4b\xa4\xd1\xd2\xba\x03\x6a\x49\xef\x1c\x24\xad\x3b\x40\x56\xe5\xf8\xde\xd2\xda\x0b\xd5\x0e\xc8\x3e\xb1\xe6\x4f\x2e\x54\xbb\xb7\x68\x85\x25\x7d\x53\x60\xa8\xee\xb8\xf8\x68\x78\x92\x48\x49\x57\xcc\x32\x40\x53\xd4\x06\x27\xde\x10\xbf\x07\x89\xda\x20\xc8\x9a\xf8\xf5\x13\xa4\x4c\xa1\x58\x92\xe0\x7d\x65\x30\x5a\xf6\xff\x87\x9c\x1d\xb3\x7f\x5f\x39\x5b\x5a\xfb\x17\x39\xfb\x45\xce\xb6\x81\xf9\x57\xcb\xd9\xee\x2d\x5a\x61\x49\xdf\xf4\xcf\x2f\x67\xf5\xae\xca\x8e\xa8\x38\x70\x65\xfa\x1d\xee\x15\xca\x3e\x49\x85\x01\x3b\xeb\x3c\x26\xdb\xe2\x80\x7d\xb6\x87\xa7\x48\x0b\x04\x19\x91\xa4\x28\xb7\x55\xc5\xc3\x3e\xa9\xd0\xc3\x1c\x46\xad\xdd\x83\x6e\x2d\x94\x7d\xf4\xc5\x08\x75\x3f\x7d\x31\xff\x97\x2f\x1a\xfc\x60\x6f\xd6\x9a\xcc\x6a\x9f\x74\x53\x75\xb6\xba\xc8\xc5\xf1\xf7\x41\xa8\xbc\xb2\x9f\x82\xc8\x5b\x70\x9e\x20\xa9\x9f\xcd\xeb\xdb\xce\x81\xf8\x54\xb8\x1b\xc2\x42\x0b\x4b\x16\xd3\xc8\x5e\x7c\xbb\x5b\xa3\xa9\x9c\x79\xc0\x7d\x0f\xea\x92\xed\xa5\x99\x99\x55\x3d\x03\x18\x56\x7e\x87\x0d\x12\x66\xf7\xf6\xa9\xb9\x10\xea\xa8\x15\x30\x61\xf6\xca\x2f\xd1\x83\x16\x37\x76\x4c\xf9\xcd\x15\xb7\xa9\x76\xf5\x54\x69\x8b\x5f\x3d\x9e\xcb\x26\x79\xbe\x2c\xdf\xca\xf2\xd7\xdf\x7b\xfe\xc6\x4f\x4f\x0b\x79\x7b\x43\x98\xb3\x17\x3f\x68\x35\xb2\xcc\x54\x16\xb8\xfd\x65\xe1\xc9\x40\x75\x8d\x3d\x57\xf1\x43\x03\xa4\x2f\xfa\xd1\xb6\x6a\x25\x31\x59\x5a\xe5\xa6\xdc\xfd\x9a\x0d\x8f\xed\xfd\x9a\xa3\xf9\xd2\x1b\x0b\xc7\xbb\x7e\xbd\x41\x73\x9c\xd3\x37\x93\x2d\xfd\x8f\x66\xeb\xdc\x59\x5e\x96\x7d\x5e\x99\x57\x13\x12\x1a\x24\x78\xd7\xb2\xcd\xbb\x8c\xce\x35\xc9\x97\x70\x66\xac\x99\x76\xf3\x3b\xbc\xcd\x03\x38\x7d\x5b\x87\x19\xe8\xd2\x89\xa0\x5d\x48\xda\x67\xef\x07\xb5\x66\x8d\x29\xd7\x44\x16\xf6\x66\x25\x3a\xd1\xf3\x89\x6a\x3d\x6d\xda\xf9\xd2\x13\x41\x98\xa5\x54\x13\xc2\xb6\x36\x55\x23\x17\x2b\x16\xb4\x2b\xf5\x67\xb6\x17\xa4\x28\xfa\xf9\x3c\x6c\xe7\x16\xcb\xb3\x48\x76\xdb\x98\x1a\xcf\x02\x4d\x24\xdf\xce\x4c\x99\xd9\x33\x5e\x5f\x80\xb9\xd2\x62\xac\xc0\xf0\x01\xc8\x84\x72\xa9\x5e\xb7\x36\x4a\x24\xdf\x94\xcd\x12\x2b\xf1\xb8\x9e\x7c\x7d\x1f\xf7\xb4\x78\x08\x82\xcc\x05\x12\x2d\x17\xe9\xf0\x66\x6d\x92\xc0\xe5\xf8\x4c\xc2\x22\x53\x66\xeb\x6f\xd1\x97\x6d\x8b\x6d\xd9\x9e\x20\x4c\x2f\xc5\x9a\xf7\xcc\xe1\xb0\x14\x0c\xa7\xbb\x82\x52\x02\x0e\x67\xed\x71\xdb\x86\x87\x2f\x3c\xfd\x85\xa7\xf7\xe5\xe9\x72\xca\xbe\x39\x84\xa5\x02\x23\x57\x7a\x10\x96\xe4\x96\x9b\x5b\xac\x6b\x22\x6f\x3c\x22\x6e\x2c\x22\x1e\xbe\x1b\x52\x14\x5e\x8b\x58\xd9\x7c\x8c\xbf\xa5\x1e\x3e\x6d\xc0\xec\xea\xb3\xaf\x46\xdd\x0b\xea\x83\xb6\xdc\x55\xae\x62\x43\xfc\xeb\x42\x16\x9d\x7b\x2e\x48\x8f\x5d\x3e\x1e\x92\x24\xa3\xe5\x12\xcd\x7d\xc4\xb6\x83\xf0\x2e\xb1\x04\x81\xe3\x49\x09\x6c\x4b\x8f\x5a\x51\xeb\x9a\xc0\xa1\x4b\x9b\xd8\xb1\xac\xe4\x17\x97\x51\xd3\x7a\x2a\xf7\x28\x7b\x9e\x50\xa9\xf2\x42\x10\xc0\x2d\xb6\x4d\xb9\x6c\xfd\x65\x2f\x92\xda\x9f\xe1\xe5\x18\x22\x92\x24\x2f\xec\xd6\x73\xd6\xd0\x7c\xf4\xe6\xcd\xc8\x66\x35\x3b\x87\x86\x40\x5b\x65\x91\x57\x26\xda\x0f\xca\x20\x97\xdf\x6c\xef\xa0\x1e\x6f\x32\xbe\x2f\x83\x39\xd6\x6e\xec\x02\xf2\x80\x33\x51\x61\x35\x46\x9c\xd9\x0a\x1f\x91\xa9\x65\x94\x67\x94\x3f\x7b\x16\xca\xf3\xb6\x3f\xde\x9d\x40\x85\x3b\x4e\xfa\xa6\xa5\x74\xfd\xc5\x16\xe6\xee\xaf\x73\x27\xc2\x78\x61\x99\x36\x40\x56\x53\x67\x8f\x36\xf4\xbb\x77\x15\x95\x53\x34\x71\xdc\x4f\xa2\xe1\x1d\xac\x3f\x4c\xc1\x7b\x1c\xb7\x54\xd4\x00\x6b\xa5\xc3\x02\x91\xe9\x19\xe8\xb9\xed\x28\x9d\xd8\x03\x3a\xc0\x81\xd9\xcd\x5d\x65\x6a\xc1\x6c\x1d\xca\x82\x24\x1f\x54\x58\xa6\xc6\x0e\xe6\xdc\xe8\x59\x22\x08\xb9\x9b\x4d\x76\xb0\x05\xec\xcb\x1a\x76\x5a\x97\x9c\x27\x87\x27\x94\xcf\x8a\xbe\x6d\x2e\x29\xe7\x9f\x29\x92\xde\x8a\x3e\x45\x72\x63\x71\x21\xbd\x7c\x76\xae\xac\xc8\xd4\x7d\xa8\x5c\x66\xf2\x38\xd1\x88\x24\x71\x8c\xb1\xbb\x76\x62\xd1\x6c\x8b\xa8\x90\x4c\xf1\x0d\x51\x54\x8b\x41\xfb\x78\x49\x23\x4d\xbd\x24\xaf\xf7\xbd\xde\xd2\x40\xc8\x63\xa4\xa5\x07\xdc\x69\x35\xc9\xd8\xda\xa2\xad\x5e\x76\xe8\x22\x49\x41\x8e\x1a\x03\xb5\x5d\x22\x29\xaa\xb2\x7d\xc2\x5b\x77\xb3\x7a\xf1\xf9\x20\x1f\x39\xd2\x16\x45\x4a\x7c\x99\xad\x82\x70\xad\x99\xa6\x95\xd5\x15\x65\xe1\x4d\x16\xb7\xc0\x38\x8b\xdc\x13\x3c\x0e\xf8\x62\x0b\x99\x2c\x2a\x52\x34\x55\x90\xb3\x64\xc7\x41\xd9\xa0\x59\xdd\x96\xf4\x2c\x0a\x19\xdb\xea\x24\x72\x00\x53\x94\xf4\x77\x0f\xda\x55\xe1\x4b\x24\xcf\x1d\x4a\xa5\x69\x14\x9c\x56\xbe\xa1\x74\xb8\x0b\xfa\xba\xd4\xfb\x30\x47\x74\x4b\xcf\x06\x69\x5c\x80\x68\xcf\x82\xfc\x7b\x78\x9b\xdb\x2b\x44\x1c\xef\x7d\x1e\xb7\xc2\x3c\xdc\x04\x29\x23\xa6\x1d\xee\x5e\x05\x3a\x3e\x94\x02\x6a\x95\x62\x1d\x16\x9b\x75\x3f\x76\x6e\xa0\x19\x5c\xfa\xfc\x73\x92\xf4\x37\x74\x25\x9f\xe6\x65\x3a\x6e\x7c\xbb\x9b\x2a\xd4\xa6\xef\xbb\x92\x70\x1e\xa6\x98\x1c\xc0\x7b\x92\xd0\xd8\x05\xb0\xb4\xcc\x3d\x85\x7e\xf9\x86\x5c\x03\xea\x73\x97\x2d\xfc\xe2\xb4\xa5\xdc\xe4\x86\x50\x66\x1f\xae\x62\xf6\x70\x5a\xab\xf7\xd2\x80\x58\x0a\x3d\xca\x46\xb9\x97\xa2\x04\xc1\x40\x4f\x6c\x72\x31\x19\xcd\x4f\x4d\x09\x2e\xc6\x59\x3f\xf7\xfd\x37\x35\xad\x5d\x5f\xaf\xa8\x6f\x52\xa7\x82\x7d\xa1\xca\x94\x4b\x6d\x68\xd9\x70\x01\x91\x49\xbd\xbe\x87\xfe\x63\x8e\xaa\xca\x97\x0d\xb9\xbf\xca\xc4\xea\x01\x89\xb7\xef\x1c\x84\x87\xb1\x70\x0b\x94\xc6\x3e\xdf\x90\x7b\xba\xc9\x36\xed\xd5\x49\xdd\xad\x18\x9f\xb3\x46\x16\x3c\x70\x94\x2a\xbd\xb7\x85\x71\xb5\xd0\xa7\x8d\x4b\x9b\x16\x5e\xa6\x08\x1e\xa1\x94\xe5\x82\xf9\x7a\x94\x06\xd0\x05\xfa\x87\xbf\x08\x2c\xe9\x3d\xc6\xf9\x24\x45\xcf\x07\x97\x0b\xff\xce\xab\x13\x2d\xa5\x37\xe6\x11\x04\xbf\x82\x5e\xd3\x23\xaf\x8f\x44\x11\x32\x45\x56\x98\x57\xd0\x95\xa8\x2a\x1f\x7a\xa5\x4a\xdd\xb5\xd2\x59\x4d\x93\x51\xf0\xcc\x14\x37\xa2\x4b\x60\xa8\xd7\x45\xc4\xb6\x5a\xab\xc1\xae\x50\x6f\xca\xb9\xe7\x8e\xb9\x66\x42\xb7\xaa\x06\x44\xdb\x1e\x7f\xcb\x48\xe2\xe3\xa9\xc5\x5c\xf2\xfd\x51\xd4\xfb\xd9\xf3\xa5\x08\x1b\xd3\x47\x39\x80\xa1\x82\x04\x89\x54\xe0\x22\x37\x0e\xcb\xa5\xd9\xb9\xb9\x5e\xb3\xfc\xf2\xcb\xdc\x5c\x32\x0b\xd1\x68\x65\x38\x43\xeb\x07\xc2\xe0\x64\x00\xe7\x48\x04\xb3\x84\xb0\xcf\x17\x7d\x28\x00\xef\x14\x7d\x3c\x49\x28\x5b\xf5\x79\xa6\xfa\x96\x5b\x64\x5f\xf1\xbe\x5b\x61\xbf\xd6\xfc\xe9\x86\xdc\xdf\x48\x0d\xb8\x71\x25\x6d\xaf\x12\x34\x5d\xc7\xa7\x88\x24\x91\x2d\x60\xdc\x7e\x7e\x6a\xbb\xae\xb6\xeb\xc2\x5a\xb3\xf8\x70\x3e\xd8\x27\x3f\x60\x2d\x24\x4f\x32\xe5\x6b\x37\xf1\x65\x55\xe8\x46\x5d\x03\xdb\xe7\x28\xf2\x34\xd5\xfc\x3d\xbd\x0d\x8f\x51\xcb\x64\x57\x2e\xd8\x42\xa6\x12\xe6\x86\x9d\xe7\x66\xf7\xb4\xbb\x89\xe6\xc5\x98\xf3\xa2\x6f\x85\xdb\x1d\x20\xf7\x80\x4f\x6d\xa4\xb0\xef\x49\xef\xa6\xb9\xdb\xc1\xa5\x19\xb4\x8c\x96\xb7\x7c\xf9\xea\xe4\x04\xfe\x1a\x9e\x67\x49\x88\xd5\x4a\x3f\xb6\x00\x36\x2e\x9d\xef\x4f\xfe\x12\x52\x75\x60\x64\x4f\x47\x6d\x77\x78\xf5\xed\x49\x89\x30\x77\x3e\x75\xfc\xf9\xf7\x27\x1d\x93\x7c\xf5\xed\xc9\x0b\xf8\x4f\x78\xf5\xf5\x49\x85\xae\xbe\x94\xb3\xf5\x00\x13\x10\xa8\x75\x73\x8c\xa2\x22\xd9\x5a\xb2\x28\x9c\x40\x0b\x1c\x40\x0d\x59\xfe\x80\xed\xf0\x26\x28\x16\xeb\x37\x68\x9c\xba\x91\x0d\xed\xb0\xac\xe1\xc2\xa6\x8d\x84\xa4\x17\x58\x09\x46\x20\xe5\x92\x1a\x53\xc1\xad\x21\xe4\xa7\xdf\x4b\xa4\x80\x57\x23\x7f\x00\x9a\x2e\xed\x48\x87\x20\xaa\xd0\x71\x55\xbd\xe6\xef\xba\x9e\xe8\x2d\xf8\xea\xe4\xe4\x2f\x4d\xcd\x09\xc6\x86\x8f\x92\x4c\xd2\x5b\xac\xd7\x42\x75\x4f\x6f\xcd\xbf\x3f\x99\x1b\x45\xf7\xfd\xc9\x5f\x8e\x44\x61\x55\xeb\x3c\xc8\x8a\x2a\xc1\x79\xb0\x2d\xd5\x0e\xab\x69\xfe\xef\x6b\x52\x65\xad\x30\xa1\xcb\x6a\x1a\x96\xd2\xa4\xa8\xb4\x3e\xcd\x18\x4d\x58\xca\x83\x03\xda\x94\x40\x25\xb7\xc8\x92\x27\x09\xbf\x33\x6f\x1b\xf8\x8a\x40\x2e\xed\x81\x28\x2a\xb5\x09\xa7\xcf\x02\xb3\xd2\x41\xf9\xd9\x33\x09\x1f\xac\x1f\xb5\x4d\x85\xcb\x97\xe6\x2d\x30\xad\xbd\x73\x35\x6d\x7b\x34\x0f\x28\x5a\x04\x4f\xaf\x27\x93\xf1\xe4\xa7\x79\x21\xdf\xbd\xb4\xfa\x50\x7e\x5b\x6c\x97\xc5\x50\xca\x6a\xcc\xc7\x95\x7d\xca\xcc\xe1\xa9\x39\x72\xed\xca\x85\x77\x0d\xaa\xda\x5a\x2b\xaf\x9b\x39\xff\xb1\xbb\x64\xdf\x00\x39\x77\xcf\x4c\xcd\x81\x14\x76\x55\x51\x4e\xdf\xae\x8a\xf1\x2a\x4c\x5e\x2a\x8f\xd2\xdc\x6b\xb9\xfa\xaa\x56\xe9\xb7\x17\x3e\x15\xb7\xee\x7f\x52\x72\x6c\xe7\xd8\xf4\x4f\x5a\xe4\x4c\xd1\xe4\x03\xcf\x24\x75\xfb\x7b\x7f\x7b\xbb\x01\xb3\xd5\xfe\x3e\xc0\xde\x6e\x00\x0d\xdb\xdf\x87\xd8\xdb\x4d\x52\xd5\x6d\xda\xc2\x0a\xdf\xcf\xea\x6e\x46\x07\x76\x58\xe1\x3b\xad\xee\xe0\x1c\x77\x58\xe1\xdd\x56\x77\x93\xe2\x85\x15\x5e\x06\xf5\x08\xb6\x78\x49\xa2\xbd\x38\x28\xe3\xe6\x8b\xb5\xfd\xc5\xda\xfe\x62\x6d\x57\x7f\xbe\x58\xdb\x5f\xac\xed\x47\xb7\xb6\x29\x9b\x22\x89\x43\x0f\xa3\x7e\xa2\xaa\xf6\xef\x8a\x11\xba\x0c\xe7\x77\x94\xd5\x6c\xe6\xfc\x41\x54\x6e\x5e\x38\xb5\xe1\x46\x73\x1f\xc4\xbc\xbd\x91\x6c\xbd\x4b\xb2\x69\x5d\xfa\xfd\xbe\xc0\x88\x6f\x34\x25\x82\x26\xcf\x7e\x2f\x72\x3a\x2c\xd1\x0d\x49\x9a\x55\x68\xe1\xa0\x68\xc5\xbb\x32\x98\x07\x9e\x48\xca\xa0\x76\xc5\x24\x5c\xe3\x52\x89\xda\x05\x82\x22\x1f\x91\x99\x0c\xb0\xe2\x38\x51\x04\xd0\x1c\x8b\x35\x50\xeb\x3d\x85\xd3\xd1\xd5\x6c\x38\x9d\xcd\x6d\xf9\x4f\xfb\x66\x46\xb3\x20\xa0\x31\x60\xa6\xa3\xcb\xf3\xe1\xd9\x68\x5e\x3c\xeb\xde\x34\x50\xf2\x87\x55\x6a\x8f\xd3\xe6\x4f\xab\xb8\x40\x61\xfe\x56\x4a\x6e\x49\xb6\xcc\x93\x14\x53\xb4\x42\xd8\x22\x4f\x34\x1e\xd0\xf2\xc5\x7a\xed\x2b\x07\x16\x41\xda\xd6\x6e\xee\xa7\xb7\xfc\x0e\x6f\xb1\xb0\x84\x3d\xc4\xc0\xfb\xb3\x9e\x61\x3c\xc0\xae\x99\xba\xe4\x6f\x64\x3c\x5b\xad\xab\x33\xc2\xfc\xf6\x0b\x55\x2e\x33\xd0\x7f\x24\xd6\xd0\x8e\xa9\x14\x59\x1a\x4c\x6e\x72\x6f\x96\xec\x15\xde\x70\x24\x0a\xfc\xdd\xa0\x30\xf0\xf7\x37\xd3\xd1\xd5\xdb\xdd\x61\x92\x0d\x97\xea\x75\x3e\xc7\xa1\x3e\x75\x62\xfc\xe0\x6d\xd4\x0e\xf4\x81\x9b\xaa\x1d\xf0\xce\x2d\xc6\xa5\x2a\x51\x23\xdf\x6a\xae\xb0\x27\xb1\xb0\x9a\x5b\xaf\x55\x76\x35\xb6\x62\xbe\xf5\x4c\x20\x4c\x43\x5a\x72\xb1\xa0\x36\x17\xd5\xbd\x3f\xd5\xd3\x5c\x6f\x48\xd3\x8c\xde\xd9\xc4\x08\x7e\x97\x3f\x48\x5a\x7e\x10\x47\x1f\x2c\xcb\xb7\x00\xdd\xcd\x89\xca\x36\x37\xbd\x5b\xd8\xac\xea\xd7\xf0\x59\x9f\xfe\xc9\xca\x12\xe0\x34\x21\x91\x7b\x34\x27\x17\x0c\x1d\x53\x4d\x12\x6d\x2b\xd8\x02\x12\xfe\x8d\x2d\x6f\x9e\x76\xec\xbf\x06\x44\xbf\x1f\x9d\x2b\xc5\x51\x47\xaf\x1a\x4d\xb2\x69\x6d\x33\xd9\x43\xd5\xa6\x4a\xd3\x90\x43\xc5\x10\xb5\xb9\xc9\xf5\x7e\xcd\x9f\x56\x6c\xee\x67\xd0\xa2\x26\x49\xfe\xc5\x7b\xd3\x12\xc3\xd4\x7a\x7c\x67\x32\x19\x8e\xdf\x91\xd3\x3a\xa8\x87\xed\xc3\x06\xb8\x9d\x41\xf7\x9f\x4b\xd2\xbb\xa8\xba\xe2\x6f\xa4\xba\x95\x96\x8d\xd4\x2b\xcc\x85\xf4\x8d\x93\x94\x61\x44\x07\xde\xaa\xb8\xba\xfe\xf1\x6a\x36\x9e\x5d\xcf\x46\x3d\x98\x8e\xce\xa6\xa3\xe1\x6c\xcf\x77\x28\x8a\x9e\x01\xb2\x59\x40\x4f\x1a\x14\x38\x9a\x2a\x0f\x4f\x83\xd8\x27\xe1\xc1\xa4\xf1\xb8\x1b\x59\x75\x4f\x65\x9b\x08\xeb\x48\x32\x90\xbc\xd0\xa2\xe1\x0b\x7b\x95\x77\x4b\xf0\x1e\xa3\xcc\xbc\xc7\xe6\xe4\x50\xc0\x45\x93\xd7\x56\x5f\x88\xaa\x75\x62\xcf\xbf\x8d\x24\x3f\x6b\xb6\x5c\x5c\x5e\x5e\x4c\x67\xd7\x93\xf1\xd5\x6c\x7c\xd6\x3e\x4f\xc6\x3d\xdf\x99\x17\xe6\x1b\x33\x8b\x4d\x3e\x7c\x69\xef\xe7\xd7\x5e\x7d\x4d\xb8\xa6\x5c\x91\x90\x9a\x37\xc8\x96\xb6\xc0\x7b\xbe\xb6\xea\x4d\x37\xfb\x58\x8b\x9d\x6c\xb3\x82\x72\xc8\xc3\x92\xc8\x46\x98\x34\xcc\xa7\x95\xb5\x37\xbe\x36\xf3\x1e\x3c\xe2\x0e\x4e\x37\x7c\xdf\x4c\xba\x6d\x39\x4c\x05\x0b\x67\xcb\xd2\x55\xf0\xbd\x32\xc4\x4a\x35\xf1\x4a\xc2\x60\x44\xa2\x75\x9e\xc5\x59\x7a\xb9\x6f\xb1\xd5\xc6\xf1\xbc\x5e\x28\xbc\x8a\x5b\xfb\x44\xfd\x5c\xaf\x67\x3e\x80\xd1\x2d\x8a\x6d\x0e\x4b\xf3\x3e\x49\x53\x24\x42\x8b\x7c\xa3\x4f\xb8\x9e\x7f\x8a\xa2\x23\x81\x0a\xca\x37\x21\xf8\x2d\x0a\x61\xca\x48\x1a\x33\x98\xa7\xfd\x04\x6f\x31\x09\xcc\xca\x76\x18\x80\x3e\x6d\x35\xfd\x80\xd6\x07\xe8\x8a\x5e\x26\xf6\x6a\xff\x9a\xa6\xc5\x59\xd4\x3e\x54\x6c\x60\xb4\xba\xf8\x8f\xf4\x0c\x96\x07\xbc\x71\xe3\xdd\x78\x9e\xb9\x21\x2c\xce\x33\x9b\x3c\x61\x6f\x0a\x5f\xe1\xcd\x92\x8b\x1b\x72\xe3\x60\x17\x39\x50\x06\x76\x8d\x9f\x47\xf7\x24\x52\xee\x6d\x8d\x22\xd1\xd6\x9c\xf3\x12\x24\xb7\xee\xb9\xb2\x72\x72\x8a\x45\x72\xc6\x24\x9a\x72\xba\x24\x17\x02\x15\xb8\x7e\xcf\xe6\xd7\x59\x6c\x92\xa9\xf5\xf3\x54\x64\x8a\x3d\xa9\x3b\x27\xb8\x3d\x25\xbb\xcb\xa8\x24\x0e\xd0\xe3\x43\x44\x18\x11\x5b\x27\x1b\x3e\x35\xde\xfd\x1b\x7e\x37\xe4\xc6\x8e\x73\x63\x3b\xbd\xd8\xbb\xf8\x79\x9d\xc9\x1a\xfd\xf6\x2a\x82\xfe\xf9\xd4\x78\x0e\xc8\x9b\x2e\xbf\x74\xd7\x13\x01\xe5\xc9\xec\x57\xed\xb9\xb3\xc2\x3b\x7c\x9a\x47\x03\x42\x6e\xb0\x1d\x65\x5a\xf7\x78\x3a\x20\x00\xb5\x7a\x4e\xef\xbe\xdc\x6d\xdf\x34\xaf\xef\xbd\xb0\x47\xce\x4e\xa1\x24\x8c\x5d\x72\xff\x11\x2f\x10\x78\x1a\x3e\xf4\x1d\x82\x00\xd0\xf6\x97\x09\xf6\x7c\x8d\x20\x44\xa7\xca\xfb\x04\x3f\xd8\x89\x73\x89\xe6\x11\x21\xd9\x0b\xb9\x57\x88\xf0\x66\x45\x6c\x91\x1c\x24\x7f\x5d\xe4\x75\x20\xb9\x85\x85\xdb\xd2\xcf\xe1\x98\xb7\x0c\x1c\xa9\x5b\x92\xd1\x1d\x77\xfd\x01\xa5\xd5\x27\xa5\xd7\x03\xf2\xeb\x1d\xd7\xf6\xa1\x17\xb2\xe1\x6c\x65\x64\x7c\x6e\x13\xb6\x3c\xe3\x0b\x20\x23\x9e\xe2\xae\x72\x1c\x75\x23\x3e\x98\x72\x5f\x5e\xe5\xae\xc7\xcd\x3a\xb2\xec\x6b\x2d\x77\x4b\xc3\x0e\x28\xd5\x63\x56\xdd\x1c\x73\xcf\x3a\x06\xf3\x1c\x78\x29\x69\x34\x44\x7f\xef\xd9\x6b\x1a\x32\x76\xab\x2f\xa9\xa9\x31\x13\x00\xbd\x07\xd0\x5c\x84\x79\xcd\xed\x03\x5e\xa7\x36\xbc\x85\xf7\xa9\x40\xe9\x0a\x39\x57\x03\x1a\x21\x57\x7c\xee\xd2\xcb\x36\xe5\x74\xc9\x82\x8a\x03\x17\x42\xd3\xb6\x78\xe8\x6e\xc1\xa0\x5b\xee\xf9\xf2\xc8\x5a\xe3\x36\x26\x57\x8d\xb3\x55\xf7\xb2\x8f\x5b\x85\x44\xd4\xf3\xd2\xf4\x1c\x08\x1b\xda\x0a\xbf\xd1\x54\xb4\x7e\x31\x87\x71\x28\xa4\xb6\x47\x84\x2b\x8f\x66\x69\x24\x1b\xab\xaa\x57\x91\x35\x01\xa8\x86\x42\xde\x2f\xc2\xb6\x21\xa3\xca\xc8\x6c\x2f\xe7\x8c\x51\x6e\xcf\x45\xb9\x89\x17\x0a\x7e\x18\x33\xf8\xc3\x95\x33\x81\x80\x40\xc5\xd4\xfa\xa3\x2c\xad\x16\x25\x57\x37\x10\x03\xd1\xbf\xee\x0b\xb3\xdd\xf1\xf2\xdd\xb1\xaf\xdd\xd1\xaf\xc3\xa2\xe6\x7b\xc4\xcd\x1f\x33\x72\xfe\xa0\xd8\x79\x0b\xc4\xf6\x78\xf6\x3e\xd1\xf3\x16\xa0\x39\x8c\xa3\xe3\xe7\x2d\x80\xff\xfa\x80\x08\x7a\x0b\x48\xf3\xf9\xf8\x18\x7a\xeb\x44\x1f\x12\x45\x6f\x01\x5a\x92\x3c\xa1\x6b\x93\xad\x91\xf4\x47\xd9\x28\x6d\xf1\xf4\x47\x8a\xa8\x1f\x10\x53\xdf\x3b\xaa\xde\x19\x57\x7f\x14\xa4\xb5\x47\xd7\x1f\x18\x5f\x6f\xc1\x99\x89\xba\x3f\x38\xc2\xbe\x13\xa1\xf5\x37\x62\xf7\x7b\xa3\xa2\xf6\x1e\xec\x43\x9f\x3a\x21\x07\xbe\x15\x5b\xf5\x7d\xec\x7a\xe6\xa4\x78\xe8\xd5\x3a\x28\x7f\x3f\xfa\x99\x93\xff\x0b\x00\x00\xff\xff\xf4\x9f\x1e\x90\xbb\xa1\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x3d\xfd\x6f\x1b\x37\xb2\xbf\xe7\xaf\x18\x34\x38\x24\x39\x48\x8a\xd3\x5e\x8b\x3e\x17\x0f\x78\xaa\xa3\x34\xc2\x39\xb2\x21\xcb\x29\x7a\xb9\xc2\xa2\x76\x47\x12\x5f\x56\xe4\x96\xe4\xda\x56\x1f\xde\xff\xfe\xc0\xaf\xfd\xe4\xae\x3e\x1c\xf7\xd2\x77\x31\x70\xbd\xd8\x4b\x0e\xc9\x99\xe1\xcc\x70\x66\x38\x7c\x0a\x67\x3c\xdd\x0a\xba\x5a\x2b\xf8\xfa\xe4\xeb\xaf\xe1\x27\xce\x57\x09\xc2\xf9\xf9\xd9\x00\x86\x49\x02\x53\xfd\x49\xc2\x14\x25\x8a\x5b\x8c\x07\x4f\x9e\xc2\x93\xa7\x70\x4e\x23\x64\x12\x63\xc8\x58\x8c\x02\xd4\x1a\x61\x98\x92\x68\x8d\xfe\x4b\x0f\xde\xa3\x90\x94\x33\xf8\x7a\x70\x02\xcf\x75\x83\xaf\xdc\xa7\xaf\x5e\xfc\xf0\xe4\x29\x6c\x79\x06\x1b\xb2\x05\xc6\x15\x64\x12\x41\xad\xa9\x84\x25\x4d\x10\xf0\x3e\xc2\x54\x01\x65\x10\xf1\x4d\x9a\x50\xc2\x22\x84\x3b\xaa\xd6\x66\x18\x07\x44\xcf\xe3\x17\x07\x82\x2f\x14\xa1\x0c\x08\x44\x3c\xdd\x02\x5f\x96\xdb\x01\x51\x76\xc6\xfa\x67\xad\x54\x7a\xfa\xf2\xe5\xdd\xdd\xdd\x80\x98\xd9\x0e\xb8\x58\xbd\x4c\x6c\x4b\xf9\xf2\x7c\x7c\x36\x9a\x5c\x8d\xfa\x5f\x0f\x4e\x6c\x9f\x6b\x96\xa0\x94\x20\xf0\xb7\x8c\x0a\x8c\x61\xb1\x05\x92\xa6\x09\x8d\xc8\x22\x41\x48\xc8\x1d\x70\x01\x64\x25\x10\x63\x50\x5c\xcf\xf8\x4e\x50\x45\xd9\xaa\x07\x92\x2f\xd5\x1d\x11\xf8\xe4\x29\xc4\x54\x2a\x41\x17\x99\xaa\xa0\xcb\xcf\x8f\xca\x4a\x03\xce\x80\x30\xf8\x6a\x78\x05\xe3\xab\xaf\xe0\xc7\xe1\xd5\xf8\xaa\xf7\xe4\x29\xfc\x3c\x9e\xbd\xbd\xb8\x9e\xc1\xcf\xc3\xe9\x74\x38\x99\x8d\x47\x57\x70\x31\x85\xb3\x8b\xc9\xeb\xf1\x6c\x7c\x31\xb9\x82\x8b\x37\x30\x9c\xfc\x02\x7f\x1f\x4f\x5e\xf7\x00\xa9\x5a\xa3\x00\xbc\x4f\x85\x9e\x3f\x17\x40\x35\x22\x2d\xf5\xae\x10\x2b\x13\x58\x72\x3b\x21\x99\x62\x44\x97\x34\x82\x84\xb0\x55\x46\x56\x08\x2b\x7e\x8b\x82\x51\xb6\x82\x14\xc5\x86\x4a\x4d\x4e\x09\x84\xc5\x4f\x9e\x42\x42\x37\x54\x11\x65\xfe\xd2\x58\xd4\xe0\x09\x65\x4b\x7e\xfa\x04\x40\x51\x95\xe0\x29\x9c\xf1\x4d\x9a\x29\x7c\x39\x66\x52\x69\x72\xfe\x24\x78\x96\xbe\x23\x8c\xac\x50\x3c\x01\x88\x51\x46\x82\xa6\x1a\xda\x29\xcc\xd6\xe8\xdb\x43\xa8\x3d\x08\x94\x3c\x13\x11\x3e\x01\xb8\xef\xc7\x51\xd2\x97\x4a\x64\x91\xea\x33\xb2\xc1\xd3\x60\x97\xbc\xe5\x9a\xc8\x3e\x25\x9b\x53\x58\x92\x44\xe2\x93\x94\xa8\xb5\xd4\xd3\x5c\xa1\xd2\xff\x17\x98\xc9\x32\x63\x91\xfe\x4d\xf3\xa8\x21\xf2\x0a\x35\x6b\x2e\xb9\xd8\x98\xd5\x03\x59\xf0\x4c\x01\x69\x1b\x17\x20\x25\x82\x6c\x50\xa1\x90\x76\x88\x3e\x74\x4e\x54\xff\x78\x86\x3b\x05\x25\x32\x74\x7f\xac\x4c\x6d\x08\xcb\x2c\x49\x80\x3a\x18\x9a\xe9\x5b\xa7\xa0\x59\x76\xbb\xef\xf2\x4c\xe3\x3f\xd9\x02\x63\x4c\x50\xe1\xbe\x2b\xb4\xad\xff\x04\x0b\x1a\x26\xc9\xa1\x6b\x4a\x92\xc3\x57\x95\x0a\xfe\xdf\x18\xa9\xae\x85\xc8\x68\x8d\x1b\x72\xea\x7e\x03\x50\xdb\x14\x4f\x41\x0b\x2d\xb6\xaa\xc0\x4a\x78\x64\x98\xe6\x01\xc0\x12\x2a\xf7\xde\x8b\xba\x6d\x80\x57\x37\x84\x6d\xff\xe4\x78\xd0\x9a\x8f\x33\x64\xca\x4c\xd2\x36\x75\xf3\x0d\x2d\xcc\x43\x71\xe2\xb6\x83\x51\xad\x1c\xf4\xf3\x93\xbe\x63\x1f\x7e\xe7\x0c\xf3\x5f\x04\xae\x8a\xd9\xdb\x2e\x29\x11\xc8\x54\x3f\xe2\x4c\x2b\x5b\x14\x75\x84\x15\x02\x36\x12\x48\x14\x56\xd6\x1a\x94\xbe\xe5\x4f\x02\x49\xdc\x57\x74\x83\x3c\x53\xa7\x70\x52\xf9\x66\x44\x52\xdb\x47\xcb\xf9\xcd\xaf\x16\x9b\x7c\x11\x22\x69\xbe\x4c\x4d\xac\xfc\x17\x45\xc4\x0a\xd5\x15\xfd\xbd\xf8\x53\x75\x85\xa9\xe0\x29\x0a\x45\x51\x16\x44\x23\x99\xe2\x6f\x91\x24\x94\xad\x2e\x79\x42\xa3\xca\x47\x3f\x0b\x22\x04\xd9\x96\xfe\x6a\x67\xbe\xe2\x4e\x63\x0d\x9b\x30\x4a\x8d\x1b\xbb\x40\x0f\xb9\xb6\xcd\x21\xd5\xed\xb7\x4e\x85\x53\xa9\x39\x9f\xac\x30\x2e\xa4\xcc\x4a\x73\xc0\x40\x5b\x4a\x25\x90\x00\x11\x61\x4e\xdf\x6f\x81\xb3\x44\xff\x07\xe1\x96\x24\x19\x0e\x1a\xf3\x94\xc8\xe2\x3e\x6e\x52\xb5\xad\x90\xb4\xc4\x4c\x54\xaa\xbe\x5d\xa9\xfe\x67\xe9\x3b\x55\xb8\xa9\xe0\x23\x48\x97\x1a\x52\x6c\x8b\x10\x0b\x77\x23\x2a\x4c\x20\xfb\xa3\xf1\xa5\xd6\x67\x6b\x8c\x3e\xd6\x3f\x05\x76\x71\x70\x5a\x96\x56\x6f\x0b\x40\x8d\xa6\x0d\x4a\x5d\x4f\xcf\x73\xeb\xca\xce\x00\x22\xdd\x13\xd4\x9a\x28\x90\x74\xc5\x48\x22\xcb\xf4\x1c\xb4\x0c\x2f\x70\x89\x02\x59\xd4\x5c\x98\xdd\xac\xd6\x1e\x2a\xcc\xac\xae\x59\x02\x2c\x29\x26\xf1\x29\x48\x4c\x96\xe7\x94\xd5\x5b\x50\x46\x15\x25\xc9\x6b\x4c\xc8\xf6\x0a\xa3\x36\x74\x51\xa6\xb0\x90\x2a\x25\xe0\x46\x16\x9b\xef\xdf\xfd\x6d\x17\x36\xc7\xd5\xb1\x76\x63\x94\x65\x9b\x05\x0a\xad\x3b\x25\x46\x9c\xc5\xd2\xa2\x52\x23\x38\xcc\xfa\x81\xe5\xdf\x11\xaa\x24\x2c\x70\xc9\x05\x02\x55\xd6\xa2\x47\xd9\xdc\x57\xfa\x8f\x8a\x03\xc3\xbb\x1c\xa8\xb6\xa5\x03\x20\x05\x46\xc8\x54\xb2\xd5\xff\x30\xc2\xaf\x98\x86\x1c\xc0\x4c\xef\x4c\x87\x56\xad\xa9\xc9\x56\x2b\x6a\x7e\x27\x8b\x46\x01\x98\xe6\x3c\x61\xfa\xd0\xdf\x51\x1b\xdd\x20\x32\xa6\x57\x4a\x05\x48\x45\x84\xca\x52\xb0\xd8\xc9\x17\xa3\xd1\xe0\x41\x06\x20\x1a\x84\x40\xac\x55\xdf\x86\x32\x2c\x70\xb7\x05\x22\x10\xae\x27\x6f\x47\xc3\xf3\xd9\xdb\x5f\xdc\x8c\x53\x81\xb7\x5a\x01\x95\xb1\x1b\x80\x5a\xc5\x37\x2c\x05\xdf\x78\x34\x68\x4c\x6a\x5c\x17\xd8\x4b\x05\x6e\x88\xca\x04\x26\x5b\x37\x8a\x11\x3a\x01\xb0\x9b\x4c\x2a\x58\xa0\x83\x47\xd8\x0a\xe1\xc3\x49\x0f\xbe\xf9\xee\xe4\xe4\xd7\x62\x9b\x2c\x88\x44\x2f\x29\x26\x9a\xa5\x1a\xb2\xb7\xb1\xad\x6b\x2c\xf8\x63\x0d\x42\x97\xe4\xd5\xa3\x15\xeb\xd5\xfd\x35\x95\x32\x77\x86\x2a\x56\x49\x99\x95\xc6\x56\xf8\x56\x16\xa7\xc1\x98\x25\xe7\x0b\x7c\xd5\xff\xf6\x7b\x88\xd6\x44\x90\x48\xdb\x24\x90\x70\xb6\x1a\xe4\xe2\x4f\x1a\xd2\xe8\xa1\xfc\xe9\x13\x59\x5c\x97\x53\x04\xd6\xdb\x74\x8d\xcc\x70\x09\xd1\xd8\x8a\xf9\x06\x96\x3c\x13\xfd\x1c\xb0\x43\x84\x9e\xb0\x0a\x2e\xa5\x39\xcf\xc0\x72\xcd\xa4\xcd\xa9\x7c\x6b\x4f\xe4\x1f\xa6\x6f\xce\x5e\x9d\x7c\xf3\xed\xaf\xcf\xf5\xd9\x5a\xba\xc3\x35\x45\xb5\x34\x47\x6b\xb1\x8c\xf4\xff\x74\x8b\x81\xba\x57\x2f\x8a\x41\x2c\x83\x70\x36\xa3\x1b\x94\x8a\x6c\xd2\xc3\x49\x77\x56\x07\x51\x6a\xaa\xed\x89\x0b\x96\x34\x55\x56\x83\xa8\x7e\x22\xa0\x3c\x98\x5d\xea\x54\xd3\xf7\x9f\x7a\xe1\xdf\x7c\xf3\xcd\x7f\xfc\xb3\x7b\xe5\xba\x89\x59\x79\x55\x09\xe2\xbd\x72\x92\xb2\xaa\x6f\x3f\x66\x0b\x14\x0c\x15\xca\x3e\xdd\x6c\x32\x45\x16\x09\xd6\x96\x10\x65\x42\x1b\x62\xc3\xa8\x62\xbf\x41\xbb\x6a\xad\x63\xad\xd2\x3f\xd4\xae\x5d\x01\xb7\xf6\xdd\x0f\xdd\xcf\x3e\x5c\x64\x2a\xcd\x14\xe8\xa6\xbf\x1a\xec\x1b\x0b\x9e\x2f\x0b\x0c\x13\x0b\xdb\xb0\xb2\xca\x25\x7e\x05\x7b\xa5\xe6\xc5\x5e\x6b\x21\x95\x11\x71\x7a\x0b\x69\x13\x3a\xce\x12\x8c\x0d\x75\x91\x44\xeb\x3a\x4c\xb5\xe6\x32\x1f\x7f\xf0\xec\x50\xba\xb4\x9b\x1f\x64\xa1\x37\x24\xa3\x6c\x55\x57\xa6\x5d\xaa\xb4\x4b\x91\xd6\x0d\xc8\x7c\x80\x5a\xbb\x56\xb2\xec\x43\x1a\xc5\x15\x49\x4a\x1a\xb7\x86\xf3\x76\x9d\xb0\x9b\x04\x8a\x6b\xc9\xe7\xf0\x82\xf1\xa0\xb4\x02\x20\xac\x5d\x87\x09\xdc\xf0\x5b\x3d\x01\x65\xf5\x42\xbb\xde\x37\xd2\x49\x9f\x05\xcd\xf9\xc0\x2a\xa2\x0a\x4d\x0f\xa0\xab\x17\x56\x8f\x46\xbf\x33\x07\xfe\x53\x52\x6f\x37\xdd\xda\xd1\xbc\x83\x6e\xde\xc4\xe1\xc2\x7c\x76\x02\x29\xd9\xc2\x02\x35\xa2\xdd\xe7\xa6\x2d\x3b\xb6\x8e\x59\x67\x27\x10\x9a\x18\xf3\xca\x36\x07\x7d\x64\xb7\x9e\xdb\x92\xd2\x91\x3d\x4d\x6a\x25\x8c\x79\xb6\x22\x94\x35\x60\x66\x4c\xd1\x44\x37\xb2\x60\x64\xc5\x06\x02\x99\x45\x11\x4a\xb9\xcc\x12\x6d\x6f\x8c\x97\xc6\xe9\xbc\x26\xb7\x08\x31\x95\x9a\xd6\x71\x2e\xfe\x03\xbc\x66\xc6\xed\x79\xc7\x34\x26\x31\xdc\xd1\x24\x31\x0e\xeb\x05\x42\xca\xd3\x2c\xd1\x0b\xfd\xc1\x8c\x87\x24\xee\x99\xc1\xe7\x9e\x59\x7e\xb6\x1c\x38\xb5\x70\xe6\x4d\x4b\xb9\x00\x59\x06\xf7\x70\x2e\xad\x0e\xfc\xc8\x3c\x5b\x1d\xec\xb1\x39\xb8\x6e\xed\xef\x92\x3d\x06\xbb\x44\x29\x7d\x7c\x2d\x98\x6d\x60\xe0\xdb\x16\xee\x63\x89\x15\x1b\x30\xb5\xb2\x28\xf9\xed\xec\x71\x39\xc2\xc1\xfe\x0c\xdd\x3a\x4d\xcb\xe0\xb1\x31\x97\xa5\xe3\x5e\x03\xee\xd9\x33\x09\xf3\xc2\x23\x31\x77\x46\x23\x89\x22\x2e\xb4\xfd\x97\x6c\x8f\xe7\x13\x2f\x12\x1f\x89\x33\x5e\x3b\xf0\x7f\x12\x69\x66\x1d\x48\xad\xd2\xcc\x7d\x3e\x1e\xdb\x8c\x33\x7c\x24\x4c\x4f\x0a\x97\xdd\xe7\x86\x65\x91\x31\xa7\xcd\x63\x2b\x70\x19\x2f\x61\x3e\x64\x64\x1d\x84\xd4\xe2\x80\xf9\x48\xa8\x9d\xe6\x03\x7c\xa6\x08\xae\xb3\x71\xe1\x79\x68\x61\x64\xf3\xdf\x80\x96\xf3\xea\xba\xb4\xe4\xb2\x0d\xe6\x36\x80\x15\x4d\x78\x4f\xa5\x69\x20\x38\x57\x90\xa2\x90\x54\x2a\x64\xaa\xe9\xb8\xa1\xf2\xa3\xa1\xbd\x57\xcb\xc4\xb8\x51\xcc\x9f\x73\xcb\x8d\x6e\xc8\x0a\xed\x8a\xa8\x84\x18\x97\x94\x19\x04\xe8\x8f\xed\x82\x5d\x8b\x6b\xad\x28\x1f\xc2\x3c\x4b\x81\x72\xfd\x98\xcc\xe3\x07\xf8\x4c\x99\xc7\x72\x84\xc0\x88\xb3\x25\x5d\x65\x02\x63\x7b\x9a\x2f\x4e\x30\xb6\x6d\xcc\x8d\xb5\xe3\x1c\xe7\x0d\xa8\x04\x04\x1a\x57\x94\xe1\xb9\x9c\x97\xdc\xf6\x1e\xc0\x1b\x7d\xd2\xba\x27\x9b\x34\xc1\x1e\x48\x54\x86\x75\xb8\xb0\x66\xbc\x71\x46\x18\x05\xd7\x80\x9b\x72\x9e\xc8\xdc\x73\xea\x17\xf5\x10\x82\x9b\x59\x3e\x26\xc1\xfd\x00\x9f\x29\xc1\x9b\xd2\xc2\x4c\xb8\x5d\x5a\xe4\x0d\x8e\xc7\xfa\x2d\x0a\xba\xdc\x3e\x1e\xd2\xdf\x7b\xf8\x9f\x29\xce\x2d\x22\x0d\x16\xa8\x96\xaf\x3e\xef\x62\xee\x00\xe5\x2e\xbe\x0f\xbf\x0e\x2a\x4e\x9d\xe6\x51\xc1\xed\xcb\xad\x9f\xca\x3c\xa1\x52\xbd\xab\x81\x99\xc3\x06\xd5\x9a\xc7\x10\xf3\x28\xdb\x20\xb3\xd9\x19\xc7\xd1\xaf\x8c\xa6\x83\xfd\x72\xaf\x8b\xce\x6d\x9e\xa0\x21\x03\x6e\xfe\x69\x3c\xe2\xf9\x07\x6b\x30\x53\x99\x87\x34\x0e\xf6\x8e\xe5\xa9\x34\x94\x33\x13\x23\xda\x1e\xee\x21\x7b\xdd\x80\x71\x98\x97\xac\xb3\x7f\x05\x0d\xf6\xab\x8f\xc4\x19\x89\x68\x04\x9e\x42\x16\x63\x5c\x59\x8c\x46\x4d\x9d\x03\xab\xc7\x2d\x12\x09\x2e\xa5\x89\xe3\x1a\x9e\x25\x2e\x8a\x4b\x92\xb6\xb0\xe0\x1e\xbe\x2b\x77\x04\x59\x93\xb4\xc5\x88\x0d\x86\xcd\x6a\x08\x9d\x15\x40\xda\x1a\x1e\x82\xcd\x12\xb8\x11\xcb\x36\x9d\x3b\x5c\xef\xe9\x0a\x1a\xa5\xee\xa6\xa5\xe0\xdd\x9a\x46\xeb\xd2\xf9\x2d\xe2\xec\x16\xc5\x0a\xa5\xcb\x9e\x0a\x6d\x42\xad\xd8\x6e\x51\x9f\xff\x04\x70\xa6\xb9\x94\xfe\x8e\xe0\xc2\x24\xcf\x63\x74\xce\x79\xfd\x4d\xe5\x8e\x7e\x69\x12\x86\x60\x9e\xa5\x31\x51\x68\x97\x30\xf0\x94\x98\x62\x79\x76\xb3\x6d\x8a\xf3\x17\x4d\xcf\xc9\x25\x97\x92\x2e\x12\x07\x51\x9e\xc2\x6c\x38\xfd\x69\x34\xbb\xb9\x7a\x3b\xbc\x1c\xdd\x5c\x4f\xae\x2e\x47\x67\xe3\x37\xe3\xd1\xeb\x1e\x0c\x27\xbf\xf4\xe0\xc7\xe1\xf9\x70\x72\xe6\x7e\xbd\xb9\x1a\x4f\x7e\x3a\x1f\xdd\xfc\xe3\x62\x32\xaa\x0b\x03\x64\xd9\xa6\x4e\xd7\x7e\x2b\xf4\x46\xc3\xe1\xe4\x97\xc6\xdf\xfc\xd8\xa1\xc6\xe5\xa9\x54\xbe\x1b\xa6\x0d\x33\x58\x3d\x78\x0e\x4d\xfe\xfa\x87\xee\xdd\xc5\x06\xa6\x01\xdc\xad\xd1\x05\xc9\x76\x6c\x0c\xeb\x36\x68\xf1\x06\x68\xab\xd6\x76\xab\xc6\xb6\xea\x54\xdb\x5b\x4f\xee\x11\x67\x87\x3d\x62\xed\x10\x8e\xb7\x43\x57\xcc\x1d\x8e\xdd\x82\x21\x8c\x77\xc5\xdf\xc1\xd1\x38\xf4\xf7\x1d\x01\x78\x08\xd3\x3b\xd8\x2e\x18\x81\x77\xf9\xa0\x1f\xf4\xf8\xbf\x3e\x7f\x19\xb9\x18\x79\xcc\x23\xf9\xd2\x32\x82\xec\x1b\xfe\x7b\xf9\x94\xdc\x12\x9a\x68\x32\x05\xf6\xa0\xfd\xd1\x40\x75\x63\x1b\x09\x33\xe7\x21\xaf\x94\x2d\xac\x12\x97\xed\x34\x18\xec\x8f\x8b\x29\x49\x9b\x34\x14\x72\x9c\xc2\xbe\xec\xb4\x24\x34\xe1\xb7\x28\xac\x1d\x71\xb8\xe6\x7e\x53\xe9\x7f\x98\xd6\xab\xf6\xad\x09\xe6\xa6\x50\xb6\x67\x04\x2d\x88\x53\x14\xda\xea\x33\xf9\xbe\x44\x9a\xc4\x38\x83\x61\xbd\x98\x4c\xe0\xc0\x58\x67\x45\x82\x4a\x05\x3b\x54\x82\xcc\xd2\x94\x6b\x4b\xb5\x07\xf3\xc9\xc5\xcd\x9b\xe1\xf8\xfc\xe2\xfd\x68\x3a\xb7\x4e\xbe\x18\x97\x24\x4b\xcc\x01\xb3\xf6\xb5\x2e\x53\x2b\x70\xaf\x27\x7f\x9f\x5c\xfc\x3c\xe9\x41\xa9\x4f\x59\x76\xd6\xe5\x66\xdf\xf7\xa8\xfc\xad\xd4\xb9\x20\x11\x65\x2b\x14\xa9\xa0\x4c\x1d\x41\x9f\xa2\x73\xa9\xd1\x7e\xc1\xb7\x52\xdf\xa6\x99\x65\x63\xf1\xd6\x23\xbd\x21\xfa\x10\x60\x73\xec\xa8\xb5\xd3\x36\xfa\xd8\x5f\x4d\xcf\x48\x78\xf4\x91\x9a\x48\xb5\xca\x7d\xd8\x74\xc5\xb8\x39\x4b\xae\xd1\x38\x10\x50\x78\x6f\x42\x88\x61\x06\x30\xac\x7a\xdd\xb3\xb4\xaf\x78\x5f\x2b\xc9\x32\x96\xf2\x40\x79\x2a\xf8\x2d\x8d\xdd\xa4\x84\xc9\x38\xe6\x60\x95\xaa\xd9\x6e\xa1\x31\x7a\x95\x01\xb8\xd6\xeb\x77\x54\x7a\x25\xf0\x5b\x86\xd2\xcd\x5e\x33\x9b\x3d\x03\xa3\x10\x5c\xc0\xfc\x6f\xaf\xbe\xd6\x26\x41\x4c\x35\xf6\x26\x5c\xbd\x43\xa5\x59\x8a\x83\xc4\x2a\x0b\x6a\x50\x09\x51\x1a\x52\x69\xd6\x3d\xd8\x90\x8f\x08\x04\xe6\x2b\x54\xcf\x5f\xcc\xf3\xd1\x14\x77\x91\x85\x5b\x6c\x45\xcc\xa1\x46\x2f\x8d\x9b\xac\xd4\x3c\x5e\xb5\x1d\xad\xea\x39\x39\xf1\xc1\xac\x55\x3b\x47\x0d\x21\x63\xf4\xb7\x0c\x81\xc6\xc8\x94\x3e\xfd\x88\x22\xa4\xee\x59\xce\x4c\xb3\x99\x73\x60\x2e\x19\x08\x58\x21\x43\xe1\xe2\x39\x54\x96\x00\x1d\x1e\x97\xa5\x65\x0c\x1f\xbe\xe5\x2a\x04\x7a\x28\x66\x6a\x2a\xc9\xc3\x06\x03\xbc\xd8\x8d\x47\xc4\x9e\xbb\xf2\xc4\x42\x19\x62\x6d\xcb\x6a\xcf\x0e\xf3\x78\x9c\x39\xff\xdb\xf1\xa8\xf4\x10\xda\x90\x56\xc3\x52\xc3\xf3\x97\xbb\x0c\xdd\x8d\x05\x17\xd3\xd7\x9c\x52\x59\x49\x5b\x32\x64\x11\xfc\xc9\xa4\xe7\xb0\x02\x76\x11\xc0\x49\x12\x60\x78\x57\xd5\x37\xbb\x9c\x02\xe5\x31\x3c\x4c\xeb\xca\xca\x7d\xa7\xe1\xd3\x9b\x83\xe7\x72\xb4\xac\xdf\x2d\x5a\x9b\xbc\xa7\xcc\xde\x42\xd9\xf2\xcc\xa4\x80\xcd\xbd\x9f\xad\x38\xf9\xf7\xec\x07\x93\x28\x7b\x6d\x24\xa2\x9c\xf1\xd2\xe7\xaa\x14\x14\xe6\x48\x52\x84\x9a\x64\xed\x6c\xa2\xa9\x39\xd7\x88\x98\x5f\x4e\x2f\x86\x67\xb3\xf1\xfb\xd1\xbc\x99\x16\x1a\x71\xb6\x4c\x68\xa4\x6a\xac\x76\x6b\x6f\xfd\x34\x13\x4b\xec\xce\xee\x3b\x85\xfc\xc9\x18\x38\xc0\x4c\xed\x3c\xec\xd3\x9e\x0f\xe7\xdd\xf3\x6a\x42\x77\x90\x67\x3d\xf4\x87\x7b\x31\xd8\x51\xa9\x6c\xbb\xd2\xd7\x4c\x06\x97\xdb\x54\x5d\x7c\x5b\x64\x7a\x99\xf4\xb4\xef\xbe\xa9\xe0\xb6\x96\xaa\xd6\xb3\x6e\xfe\x4f\x91\x12\x76\x00\x72\xe2\x4b\x2e\xd4\xe1\x99\xd6\x93\xbc\x6b\x1b\x9e\x4c\x0b\xd0\x06\xa5\x84\x92\x77\xdc\xfb\xa2\xab\x12\x5b\xda\x75\xa3\x71\xb6\x89\xea\x21\xd5\xe4\xd9\x51\x59\x97\xf1\xc7\xaa\xf8\xcf\x2f\x19\x3b\x88\xcb\xae\x33\x60\x9d\xa9\xcb\x53\xd8\x2f\xf9\xba\xc6\xe0\x01\xf2\xe5\xec\x9b\xdb\x1b\x2c\x27\x68\x0b\x73\x97\x18\x3a\x70\xf0\xfa\xe4\x2c\x7e\x10\xcd\x0d\x42\xb9\x50\x8f\x9a\x84\xad\x69\xb8\x1b\xab\x7a\x1a\xce\x3b\xde\x73\xbe\xb3\x88\x30\x93\xc6\xe5\xfc\x5c\x0b\x54\x77\x88\x0c\x5e\x19\x74\x7d\xf7\xed\xb7\xdf\x7c\x7b\xe4\xc2\xdd\xb5\x8b\xc3\x25\xe0\x65\xe5\xbe\x46\x78\x1d\xb6\x49\xbe\xa3\x4b\x97\x09\x0f\x24\xce\xde\x1a\x2b\xe1\x59\xec\x7f\xb5\x72\x57\xbc\x6c\xce\x34\xd7\x5a\xac\xcc\xe5\xd6\xa9\x70\x38\x26\xa6\xe5\x0b\x3c\xf0\x89\xec\xd5\x0f\x76\x36\x7b\x3b\x51\x2a\xab\x6b\xf7\x8b\x38\xa3\x47\xa0\xa4\x31\x4a\x78\xbe\x34\x21\x42\xe7\xa1\xf3\x88\x93\x2f\x0e\x37\x8b\xbd\xfe\x3f\x1c\x7d\x57\xcd\xbb\x11\x0f\x42\xe0\xce\x0b\x3a\xb3\x5a\xc8\xdb\x9d\x83\x6c\x50\xdc\xd9\xa8\xd7\xd3\xf3\x63\x70\x20\x6e\x69\x84\xc3\x28\xe2\xd9\x31\x2e\x87\xab\x4a\xff\xae\x9d\xe5\x46\x32\x39\x4a\x19\x53\x2e\xd2\x68\xdc\x08\x44\x6a\xa3\xda\x1c\xe2\x88\x8b\xec\x6a\xeb\x5a\x6b\x0a\x52\x4f\x25\x06\xef\x10\xb2\x19\xef\x1d\x0c\xc3\x59\xf5\x96\x45\x63\x0a\x55\xb0\x0c\x31\x96\x66\xdc\xf2\x6d\xe6\xfc\x6e\x77\x39\x73\x2b\xf6\x17\x2a\x4b\xf0\x7f\xdc\x7a\x67\x52\xaf\x71\xfe\xaf\x2f\xfd\x7f\x9c\x94\x99\x18\x79\xf9\xbf\xff\x15\x69\x11\xe0\x1a\xc9\xc1\xca\xfd\xcb\xb5\x1e\x44\x7c\x53\x77\x6a\x69\xac\x35\x4d\xee\xdd\xb2\x66\x4c\x36\x2f\x5b\x29\x96\x4b\x18\xdc\x10\x9a\x14\x1c\xa2\x88\xc2\x65\x96\x1c\x1b\x2e\xbb\xaa\xf4\x3f\xcc\x69\xd8\xda\xb7\xc2\x5d\xbe\x55\x6e\x91\x59\x33\x3b\xdf\x54\x1e\x72\x5c\x35\xb2\x4a\xe0\xda\xac\x92\x54\xb8\xb2\x06\x66\x88\x70\x00\x22\xe8\x37\x6f\x28\x9e\x32\x9c\xb6\xb6\xfb\xe2\xa1\x13\x5a\x97\x93\x3d\xa6\xf2\x63\xd0\xf7\xde\x19\x01\x00\x20\xb1\xf5\x73\x91\xe4\xb2\xd3\x85\xbf\x13\xd0\x83\x17\xfb\x5a\xaf\x20\x08\xb8\x3b\xb8\x00\xee\x2e\xe6\xeb\xd2\x55\xf0\xf6\xf9\x77\x84\x1a\x20\x7c\x3f\xd3\xc2\xdd\xdd\xe3\x01\xab\x2e\x86\x09\x44\x35\x8b\x9f\x86\x2b\x5d\x62\xbe\x85\x2d\x03\xb8\x34\x65\xd4\x0a\xa4\x94\xe0\x18\x67\x9d\x8b\x86\xf2\x05\xb8\x9e\x77\xac\x72\x01\xef\xdf\x15\x82\x37\x4f\xf1\x29\x44\x77\x5b\xa4\x04\x8c\x47\x8f\x4a\x58\x26\x64\xe5\x65\x9a\x91\xb2\xfe\x58\x05\xd4\x9a\x15\x26\x2b\x4c\xae\x79\x96\xc4\xa5\xf9\x76\x4d\x74\xa9\x50\x00\x35\x3e\x20\xc6\x8d\x8d\x8e\xc2\xc2\x77\x1a\xc3\x28\x88\x1e\xe0\x60\x35\xb0\xde\x68\xf3\x47\x7a\x8b\xcd\x3c\xef\xe2\xa7\x48\xf1\xb5\xa6\xe1\xdd\x9a\x27\x58\x84\x67\x7c\x26\x28\x4c\xb8\xc2\x53\x87\x6a\xa2\x14\x89\xd6\x9d\xb3\xa5\x0c\xa6\xa3\xe1\xeb\x9b\x8b\xc9\xf9\x2f\xb0\xe1\x31\x6a\xc3\xd9\x25\x91\x6b\x94\xf7\x73\xc0\x9d\x21\x89\xea\xcf\x64\xf4\x7e\x34\xed\xc1\xc5\xe4\xe6\x72\x34\x7d\x37\x9c\x8c\x26\xb3\x9b\xf1\xe4\x6a\x36\x9c\x9c\x8d\x6e\x5e\x8f\xce\x47\xb3\xf1\xc5\xa4\x1e\xed\x2d\x7e\x42\x71\xdf\xe2\xa7\x6f\xc1\x77\x7c\xef\x1e\x37\xd0\xb1\x99\x62\x11\xdc\xe6\x15\xe6\x36\x8d\x8a\xdb\x06\xac\xe2\x0d\x74\x19\x6a\x79\xfa\xbc\xdf\x4f\xc1\x49\xf3\x46\x3e\xa3\x67\xf0\x1e\xa0\x8a\x5c\xf8\x63\x43\x0c\x9d\x3f\xe2\xd6\xe7\xc3\x19\xee\x44\xad\x4d\x83\x60\xf5\x5a\xa4\x37\x8e\x0d\x3f\x34\x37\x03\xde\x2b\x14\x8c\x24\xe3\xf4\xcf\x2d\x98\x47\xc5\x3a\xfe\xed\xc4\x73\x69\xed\x9f\x42\x48\x8f\x2f\xeb\x22\x5a\x60\x82\x44\x7e\xce\x32\x7a\x7c\x09\x16\x25\xc4\x65\xd1\x68\x69\xdd\x01\xb5\xa4\x77\x0e\x92\xd6\x1d\x20\xab\x72\x7c\x6f\x69\xed\x85\x6a\x07\x64\x9f\x55\xf3\x27\x17\xaa\xdd\x5b\xb4\xc2\x92\xbe\x29\x30\x54\x77\x5c\x7c\x34\x3c\x49\xa4\xa4\x2b\x66\x19\xa0\x29\x6a\x83\x13\x6f\x88\xdf\x83\x44\x6d\x10\x64\x4d\xfc\xfa\x09\x52\xa6\x50\x2c\x49\xf0\xb2\x32\x18\x2d\xfb\xff\x43\xce\x8e\xd9\xbf\xaf\x9c\x2d\xad\xfd\x8b\x9c\xfd\x22\x67\xdb\xc0\xfc\xab\xe5\x6c\xf7\x16\xad\xb0\xa4\x6f\xfa\xe7\x97\xb3\x7a\x57\x65\x47\x94\x1b\xb8\x32\xfd\x0e\xf7\x0a\x65\x9f\xa4\xbc\x80\x9d\x75\x1e\x90\x6d\xf1\xbe\x3e\xdb\xc3\x53\xa4\x05\x82\x8c\x48\x52\xd4\xda\xaa\xe2\x61\x9f\x3c\xe8\x61\x0e\xa3\xd6\xee\x41\x57\x16\xca\x0e\xfa\x62\x84\xba\x93\xbe\x98\xff\xcb\x17\x0d\x7e\xb0\xd7\x6a\x4d\x5a\xb5\xcf\xb8\xa9\xba\x5a\x5d\xd8\xe2\xf8\xcb\x20\x54\x5e\xd9\x4f\x41\xe4\x2d\x38\x4f\x90\xd4\xcf\xe6\xf5\x6d\xe7\x40\x7c\x2a\xdc\x0d\x61\xa1\x85\x25\x8b\x69\x64\x6f\xbd\xdd\xad\xd1\x94\xcd\x3c\xe0\xb2\x07\x75\x99\xf6\xd2\xcc\xcc\xaa\x9e\x01\x0c\x2b\xbf\xc3\x06\x09\xb3\x7b\xfb\xd4\xdc\x06\x75\xd4\x0a\x98\x30\x7b\x25\x97\xe8\x41\x8b\xeb\x3a\xa6\xf6\xe6\x8a\xdb\x3c\xbb\x7a\x9e\xb4\xc5\xaf\x1e\xcf\xa5\x92\x3c\x5f\x96\xaf\x64\xf9\xbb\xef\x3d\x7f\xdd\xa7\xa7\x85\xbc\xbd\x1e\xcc\xd9\x8b\x1f\xb4\x1a\x59\x66\x2a\x0b\x5c\xfd\xb2\xf0\x64\xa0\xb4\xc6\x9e\xab\xf8\xa1\x01\xd2\x57\xfc\x68\x5b\xb5\x92\x98\x2c\xad\x72\x53\xee\x72\xcd\x86\xc7\xf6\x72\xcd\xd1\x7c\xe9\x8d\x85\xe3\x5d\xbf\xde\xa0\x39\xce\xe9\x9b\xc9\x96\xfe\x47\xb3\x75\xee\x2c\x2f\xcb\x3e\xaf\xcc\xab\xd9\x08\x0d\x12\xbc\x6b\xd9\xe6\x5d\x46\xe7\x9a\xe4\x4b\x38\x33\xd6\x4c\xbb\xf9\x1d\xde\xe6\x01\x9c\xbe\xad\xc3\x0c\x74\xe9\x44\xd0\x2e\x24\xed\xb3\xf7\x83\x5a\xb3\xc6\x94\x6b\x22\x0b\x7b\xb3\x12\x9d\xe8\xf9\x2c\xb5\x9e\x36\xed\x7c\xdd\x89\x20\xcc\x52\x9e\x09\x61\x5b\x9b\xa7\x91\x8b\x15\x0b\xda\xd5\xf9\x33\xdb\x0b\x52\x14\xfd\x7c\x1e\xb6\x73\x8b\xe5\x59\x64\xba\x6d\x4c\x81\x67\x81\x26\x8c\x6f\x67\xa6\xcc\xec\x19\xaf\x2f\xc0\xdc\x67\x31\x56\x60\xf8\x00\x64\xe2\xb8\x54\xaf\x5b\x1b\x25\x92\x6f\xca\x66\x89\x95\x78\x5c\x4f\xbe\xbe\x8f\x7b\x5a\x3c\x04\x41\xe6\x02\x89\x96\x2b\x74\x78\xb3\x36\x49\xe0\x72\x7c\x26\x61\x91\x29\xb3\xf5\xb7\xe8\x6b\xb6\xc5\xb6\x66\x4f\x10\xa6\x97\x62\xcd\x4b\xe6\x70\x58\xfe\x85\xd3\x5d\x41\x29\x01\x87\xb3\xf6\xb8\x6d\xc3\xc3\x17\x9e\xfe\xc2\xd3\xfb\xf2\x74\x39\x5f\xdf\x1c\xc2\x52\x81\x91\xab\x3b\x08\x4b\x72\xcb\xcd\x15\xd6\x35\x91\x37\x1e\x11\x37\x16\x11\x0f\xdf\x0d\x29\x0a\xaf\x45\xac\x6c\x3e\xc6\xdf\x52\x0f\x9f\x36\x60\x76\xf5\xd9\x57\xa3\xee\x05\xf5\x41\x5b\xee\x2a\x57\xb1\x21\xfe\x75\x21\x8b\xce\x3d\x17\xa4\xc7\x2e\x1f\x0f\x49\x92\xd1\x72\x89\xe6\x32\x62\xdb\x41\x78\x97\x58\x82\xc0\xf1\xa4\x04\xb6\xa5\x47\xad\xa2\x75\x4d\xe0\xd0\xa5\x4d\xeb\x58\x56\x92\x8b\xcb\xa8\x69\x3d\x95\x7b\x94\x3d\x4f\xa8\x54\x79\x15\x08\xe0\x16\xdb\xa6\x56\xb6\xfe\xb2\x17\x49\xed\xcf\xf0\x72\x0c\x11\x49\x92\x17\x76\xeb\x39\x6b\x68\x3e\x7a\xf3\x66\x64\x53\x9a\x9d\x43\x43\xa0\x2d\xb1\xc8\x2b\x13\xed\x07\x65\x90\x4b\x6e\xb6\x17\x50\x8f\x37\x19\xdf\x97\xc1\x1c\x6b\x37\x76\x01\x79\xc0\x99\xa8\xb0\x1a\x23\xce\x6c\x79\x8f\xc8\x14\x32\xca\xd3\xc9\x9f\x3d\x0b\x25\x79\xdb\x1f\xef\x4e\xa0\xc2\x1d\x27\x7d\xd3\x52\xae\xfe\x62\x0b\x73\xf7\xd7\xb9\x13\x61\xbc\xb0\x4c\x1b\x20\xab\x79\xb3\x47\x1b\xfa\xdd\xbb\x8a\xca\x29\x9a\x38\xee\x27\xd1\xf0\x0e\xd6\x1f\xa6\xe0\x3d\x8e\x5b\xca\x69\x80\xb5\xd2\x61\x81\xc8\xf4\x0c\xf4\xdc\x76\xd4\x4d\xec\x01\x1d\xe0\xc0\xec\xe6\xae\x1a\xb5\x60\xb6\x0e\x65\x41\x92\x0f\x2a\x2c\x53\x63\x07\x73\x6e\xf4\x2c\x11\x84\xdc\xcd\x26\x3b\xd8\x02\xf6\x65\x0d\x3b\xad\x4b\xce\x93\xc3\xb3\xc9\x67\x45\xdf\x36\x97\x94\xf3\xcf\x14\x29\x6f\x45\x9f\x22\xb3\xb1\xb8\x8d\x5e\x3e\x3b\x57\x56\x64\x8a\x3e\x54\x6e\x32\x79\x9c\x68\x44\x92\x38\xc6\xd8\xdd\x39\xb1\x68\xb6\x15\x54\x48\xa6\xf8\x86\x28\xaa\xc5\xa0\x7d\xb9\xa4\x91\xa3\x5e\x92\xd7\xfb\xde\x6d\x69\x20\xe4\x31\x72\xd2\x03\xee\xb4\x9a\x64\x6c\x6d\xd1\x56\x2c\x3b\x74\x8b\xa4\x20\x47\x8d\x81\xda\x6e\x90\x14\x25\xd9\x3e\xe1\x95\xbb\x59\xbd\xf2\x7c\x90\x8f\x1c\x69\x8b\x0a\x25\xbe\xc6\x56\x41\xb8\xd6\x34\xd3\xca\xea\x8a\x9a\xf0\x26\x85\x5b\x60\x9c\x45\xee\xfd\x1d\x07\x7c\xb1\x85\x4c\x16\xe5\x28\x9a\x2a\xc8\x59\xb2\xe3\xa0\x6c\xd0\xac\x6e\xeb\x79\x16\x55\x8c\x6d\x69\x12\x39\x80\x29\x4a\xfa\xbb\x07\xed\x4a\xf0\x25\x92\xe7\x0e\xa5\xd2\x34\x0a\x4e\x2b\x5f\x4f\x3a\xdc\x05\x7d\x5d\xea\x7d\x98\x23\xba\xa5\x67\x83\x34\x2e\x40\xb4\x67\x35\xfe\x3d\xbc\xcd\xed\xe5\x21\x8e\xf7\x3e\x8f\x5b\x61\x1e\x6e\x82\x94\x11\xd3\x0e\x77\xaf\xea\x1c\x1f\x4a\x01\xb5\x4a\xa5\x0e\x8b\xcd\xba\x1f\x3b\x37\xd0\x0c\x2e\x7d\xf2\x39\x49\xfa\x1b\xba\x92\x4f\xf3\x1a\x1d\x37\xbe\xdd\x4d\x15\x6a\xd3\xf7\x5d\xc9\x36\x0f\x53\x4c\x0e\xe0\x3d\x49\x68\xec\x02\x58\x5a\xe6\x9e\x42\xbf\x7c\x3d\xae\x01\xf5\xb9\xcb\x15\x7e\x71\xda\x52\x6b\x72\x43\x28\xb3\xaf\x56\x31\x7b\x38\xad\x15\x7b\x69\x40\x2c\x85\x1e\x65\xa3\xd6\x4b\x51\x7f\x60\xa0\x27\x36\xb9\x98\x8c\xe6\xa7\xa6\xfe\x16\xe3\xac\x9f\xfb\xfe\x9b\x9a\xd6\xae\xaf\x57\x14\x37\xa9\x53\xc1\x3e\x4f\x65\x6a\xa5\x36\xb4\x6c\xb8\x7a\xc8\xa4\x5e\xdc\x43\xff\x31\x47\x55\xe5\xcb\x86\xdc\x5f\x65\x62\xf5\x80\xc4\xdb\x77\x0e\xc2\xc3\x58\xb8\x05\x4a\x63\x9f\x6f\xc8\x3d\xdd\x64\x9b\xf6\xd2\xa4\xee\x4a\x8c\xcf\x59\x23\x0b\x1e\x38\x4a\x95\x1e\xdb\xc2\xb8\x5a\xe5\xd3\xc6\xa5\x4d\x0b\x2f\x53\x04\x8f\x50\xca\x72\xb5\x7c\x3d\x4a\x03\xe8\x02\xfd\xab\x5f\x04\x96\xf4\x1e\xe3\x7c\x92\xa2\xe7\x83\xcb\x85\x7f\xe7\xd5\x89\x96\xd2\x1b\xf3\x02\x82\x5f\x41\xaf\xe9\x91\xd7\x47\xa2\x08\x99\x22\x2b\xcc\xcb\xe7\x4a\x54\x95\x0f\xbd\x52\x99\xee\x5a\xdd\xac\xa6\xc9\x28\x78\x66\x2a\x1b\xd1\x25\x30\xd4\xeb\x22\x62\x5b\x2d\xd4\x60\x57\xa8\x37\xe5\xdc\x73\xc7\x5c\x33\xa1\x5b\x55\x03\xa2\x6d\x8f\xbf\x65\x24\xf1\xf1\xd4\x62\x2e\xf9\xfe\x28\x8a\xfd\xec\xf9\x4c\x84\x8d\xe9\xa3\x1c\xc0\x50\x41\x82\x44\x2a\x70\x91\x1b\x87\xe5\xd2\xec\xdc\x5c\xaf\x59\x7e\xf3\x65\x6e\x6e\x98\x85\x68\xb4\x32\x9c\xa1\xf5\x03\x61\x70\x32\x80\x73\x24\x82\x59\x42\xd8\xb7\x8b\x3e\x14\x80\x77\x8a\x3e\x9e\x24\x94\xad\xfa\x3c\x53\x7d\xcb\x2d\xb2\xaf\x78\xdf\xad\xb0\x5f\x6b\xfe\x74\x43\xee\x6f\xa4\x06\xdc\xb8\x8f\xb6\x57\xfd\x99\xae\xe3\x53\x44\x92\xc8\x56\x2f\x6e\x3f\x3f\xb5\xdd\x55\xdb\x75\x5b\xad\x59\x79\x38\x1f\xec\x93\x1f\xb0\x16\x92\x27\x99\xf2\x85\x9b\xf8\xb2\x2a\x74\xa3\xae\x81\xed\x5b\x14\x79\x9a\x6a\xfe\x98\xde\x86\xc7\xa8\x65\xb2\xab\x15\x6c\x21\x53\x09\x73\xc3\xce\x73\xb3\x7b\xda\xdd\x44\xf3\x62\xcc\x79\xd1\xb7\xc2\xed\x0e\x90\x7b\xbd\xa7\x36\x52\xd8\xf7\xa4\x77\xd3\xdc\xed\xe0\xd2\x0c\x5a\x46\xcb\x5b\xbe\x7c\x75\x72\x02\x7f\x0d\xcf\xb3\x24\xc4\x6a\x75\x1f\x5b\x00\x1b\x97\xce\xf7\x27\x7f\x09\xa9\x3a\x30\xb2\xa7\xa3\xb0\x3b\xbc\xfa\xf6\xa4\x44\x98\x3b\x9f\x3a\xfe\xfc\xfb\x93\x8e\x49\xbe\xfa\xf6\xe4\x05\xfc\x27\xbc\xfa\xfa\xa4\x42\x57\x5f\xc7\xd9\x7a\x80\x09\x08\xd4\xba\x39\x46\x51\x91\x6c\x2d\x59\x14\x4e\xa0\x05\x0e\xa0\x86\x2c\x7f\xc0\x76\x78\x13\x14\x8b\xf5\x1b\x34\x4e\xdd\xc8\x86\x76\x58\xd6\x70\x61\xd3\x46\x42\xd2\x0b\xac\x04\x23\x90\x72\x49\x8d\xa9\xe0\xd6\x10\xf2\xd3\xef\x25\x52\xc0\xab\x91\x3f\x00\x4d\x97\x76\xa4\x43\x10\x55\xe8\xb8\xaa\x5e\xf3\x17\x5d\x4f\xf4\x16\x7c\x75\x72\xf2\x97\xa6\xe6\x04\x63\xc3\x47\x49\x26\xe9\x2d\xd6\x0b\xa1\xba\x77\xb7\xe6\xdf\x9f\xcc\x8d\xa2\xfb\xfe\xe4\x2f\x47\xa2\xb0\xaa\x75\x1e\x64\x45\x95\xe0\x3c\xd8\x96\x6a\x87\xd5\x34\xff\xf7\x35\xa9\xb2\x56\x98\xd0\x65\x35\x0d\x4b\x69\x52\x54\x5a\x9f\x66\x8c\x26\x2c\xe5\xc1\x01\x6d\x4a\xa0\x92\x5b\x64\xc9\x93\x84\xdf\x99\x87\x0d\x7c\x39\x20\x97\xf6\x40\x14\x95\xda\x84\xd3\x67\x81\x59\xe9\xa0\xfc\xec\x99\x84\x0f\xd6\x8f\xda\xa6\xc2\xe5\x4b\xf3\x10\x98\xd6\xde\xb9\x9a\xb6\x3d\x9a\x07\x14\x2d\x82\xa7\xd7\x93\xc9\x78\xf2\xd3\xbc\x90\xef\x5e\x5a\x7d\x28\x3f\x2c\xb6\xcb\x62\x28\x65\x35\xe6\xe3\xca\x3e\x65\xe6\xf0\xd4\x1c\xb9\x76\xe5\xc2\xbb\x06\x55\x6d\xad\x95\xa7\xcd\x9c\xff\xd8\xdd\xb0\x6f\x80\x9c\xbb\x37\xa6\xe6\x40\x0a\xbb\xaa\xa8\xa5\x6f\x57\xc5\x78\x15\x26\x2f\xd5\x46\x69\xee\xb5\x5c\x7d\x55\x4b\xf4\xdb\xeb\x9e\x8a\x5b\xf7\x3f\x29\x39\xb6\x73\x6c\xfa\xf7\x2c\x72\xa6\x68\xf2\x81\x67\x92\xba\xfd\xbd\xbf\xbd\xdd\x80\xd9\x6a\x7f\x1f\x60\x6f\x37\x80\x86\xed\xef\x43\xec\xed\x26\xa9\xea\x36\x6d\x61\x85\xef\x67\x75\x37\xa3\x03\x3b\xac\xf0\x9d\x56\x77\x70\x8e\x3b\xac\xf0\x6e\xab\xbb\x49\xf1\xc2\x0a\x2f\x83\x7a\x04\x5b\xbc\x24\xd1\x5e\x1c\x94\x71\xf3\xc5\xda\xfe\x62\x6d\x7f\xb1\xb6\xab\x3f\x5f\xac\xed\x2f\xd6\xf6\xa3\x5b\xdb\x94\x4d\x91\xc4\xa1\x57\x51\x3f\x51\x49\xfb\x77\xc5\x08\x5d\x86\xf3\x3b\xca\x6a\x36\x73\xfe\x1a\x2a\x37\xcf\x9b\xda\x70\xa3\xb9\x0f\x62\x1e\xde\x48\xb6\xde\x25\xd9\xb4\x2e\xfd\x7e\x5f\x60\xc4\x37\x9a\x12\x41\x93\x67\xbf\xe7\x38\x1d\x96\xe8\x86\x24\xcd\x12\xb4\x70\x50\xb4\xe2\x5d\x19\xcc\x03\x4f\x24\x65\x50\xbb\x62\x12\xae\x71\xa9\x3e\xed\x02\x41\x91\x8f\xc8\x4c\x06\x58\x71\x9c\x28\x02\x68\x8e\xc5\x1a\xa8\xf5\x9e\xc2\xe9\xe8\x6a\x36\x9c\xce\xe6\xb6\xf6\xa7\x7d\x30\xa3\x59\x0d\xd0\x18\x30\xd3\xd1\xe5\xf9\xf0\x6c\x34\x2f\xde\x74\x6f\x1a\x28\xf9\xab\x2a\xb5\x97\x69\xf3\x77\x55\x5c\xa0\x30\x7f\x28\x25\xb7\x24\x5b\xe6\x49\x8a\x29\x5a\x21\x6c\x91\x27\x1a\xaf\x67\xf9\x4a\xbd\xf6\x89\x03\x8b\x20\x6d\x6b\x37\xf7\xd3\x5b\x7e\x87\xb7\x58\x58\xc2\x1e\x62\xe0\xf1\x59\xcf\x30\x1e\x60\xd7\x4c\x5d\xf2\x37\x32\x9e\xad\xd6\xd5\x19\x61\x7e\xfb\x85\x2a\x97\x19\xe8\x3f\x12\x6b\x68\xc7\x54\x8a\x2c\x0d\x26\x37\xb9\x07\x4b\xf6\x0a\x6f\x38\x12\x05\xfe\x6e\x50\x18\xf8\xfb\x9b\xe9\xe8\xea\xed\xee\x30\xc9\x86\x4b\xf5\x3a\x9f\xe3\x50\x9f\x3a\x31\x7e\xf0\x36\x6a\x07\xfa\xc0\x4d\xd5\x0e\x78\xe7\x16\xe3\x52\x95\xa8\x91\x6f\x35\x57\xd5\x93\x58\x58\xcd\xad\xd7\x2a\xbb\x1a\x5b\x31\xdf\x7a\x26\x10\xa6\x21\x2d\xb9\x58\x50\x9b\x8b\xea\x1e\x9f\xea\x69\xae\x37\xa4\x69\x46\xef\x6c\x62\x04\xbf\xcb\x5f\x23\x2d\xbf\x86\xa3\x0f\x96\xe5\x5b\x80\xee\xe6\x44\x65\x9b\x9b\xde\x2d\x6c\x56\xf5\x6b\xf8\xac\x4f\xff\x5e\x65\x09\x70\x9a\x90\xc8\xbd\x98\x93\x0b\x86\x8e\xa9\x26\x89\xb6\x15\x6c\x01\x09\xff\xc0\x96\x37\x4f\x3b\xf6\x5f\x03\xa2\xdf\x8f\xce\x95\xe2\xa8\xa3\x57\x8d\x26\xd9\xb4\xb6\x99\xec\xa1\x6a\x53\xa5\x69\xc8\xa1\x62\x88\xda\xdc\xe4\x7a\xbf\xe6\xef\x2a\x36\xf7\x33\x68\x51\x93\x24\xff\xe2\xbd\x69\x89\x61\x0a\x3d\xbe\x33\x99\x0c\xc7\xef\xc8\x69\x1d\xd4\xc3\xf6\x61\x03\xdc\xce\xa0\xfb\xcf\x25\xe9\x5d\x54\x5d\xf1\x37\x52\xdd\x4a\xcb\x46\xea\x15\xe6\x42\xfa\xc6\x49\xca\x30\xa2\x03\x0f\x55\x5c\x5d\xff\x78\x35\x1b\xcf\xae\x67\xa3\x1e\x4c\x47\x67\xd3\xd1\x70\xb6\xe7\x23\x14\x45\xcf\x00\xd9\x2c\xa0\x27\x0d\x0a\x1c\x4d\x95\x87\xa7\x41\xec\x93\xf0\x60\xd2\x78\xdc\x8d\xac\xba\xa7\xb2\x4d\x84\x75\x24\x19\x48\x5e\x68\xd1\xf0\x85\xbd\xca\xa3\x25\x78\x8f\x51\x66\x1e\x63\x73\x72\x28\xe0\xa2\xc9\x0b\xab\x2f\x44\xd5\x3a\xb1\xe7\xdf\x46\x92\x9f\x35\x5b\x2e\x2e\x2f\x2f\xa6\xb3\xeb\xc9\xf8\x6a\x36\x3e\x6b\x9f\x27\xe3\x9e\xef\xcc\xf3\xf2\x8d\x99\xc5\x26\x1f\xbe\xb4\xf7\xf3\x6b\xaf\xbe\x22\x5c\x53\xae\x48\x48\xcd\x03\x64\x4b\x5b\xdd\x3d\x5f\x5b\xf5\xa6\x9b\x7d\xa9\xc5\x4e\xb6\x59\x3e\x39\xe4\x61\x49\x64\x23\x4c\x1a\xe6\xd3\xca\xda\x1b\x5f\x9b\x79\x0f\x1e\x71\x07\xa7\x1b\xbe\x6f\x26\xdd\xb6\x1c\xa6\x82\x55\xb3\x65\xe9\x2a\xf8\x5e\x19\x62\xa5\x8a\x78\x25\x61\x30\x22\xd1\x3a\xcf\xe2\x2c\x3d\xdb\xb7\xd8\x6a\xe3\x78\x5e\xaf\x12\x5e\xc5\xad\x7d\x9f\x7e\xae\xd7\x33\x1f\xc0\xe8\x16\xc5\x36\x87\xa5\x79\x9f\xa4\x29\x12\xa1\x45\xbe\xd1\x27\x5c\xcf\x3f\x45\xd1\x91\x40\x05\xe5\x9b\x10\xfc\x16\x85\x30\x35\x24\x8d\x19\xcc\xd3\x7e\x82\xb7\x98\x04\x66\x65\x3b\x0c\x40\x9f\xb6\x9a\x7e\x40\xeb\x03\x74\x15\x2f\x13\x7b\xb5\x7f\x4d\xd3\xe2\x2c\x6a\x5f\x29\x36\x30\x5a\x5d\xfc\x47\x7a\x06\xcb\x03\xde\xb8\xf1\x6e\x3c\xcf\xdc\x10\x16\xe7\x99\x4d\x9e\xb0\x37\x85\xaf\xf0\x66\xc9\xc5\x0d\xb9\x71\xb0\x8b\x1c\x28\x03\xbb\xc6\xcf\xa3\x7b\x12\x29\xf7\xb0\x46\x91\x68\x6b\xce\x79\x09\x92\x5b\xf7\x56\x59\x39\x39\xc5\x22\x39\x63\x12\x4d\x2d\x5d\x92\x0b\x81\x0a\x5c\xbf\x67\xf3\xeb\x2c\x36\xc9\xd4\xfa\x79\x2a\x32\xc5\x9e\xd4\x9d\x13\xdc\x9e\x92\xdd\x65\x54\x12\x07\xe8\xf1\x21\x22\x8c\x88\xad\x93\x0d\x9f\x1a\xef\xfe\x01\xbf\x1b\x72\x63\xc7\xb9\xb1\x9d\x5e\xec\x5d\xf9\xbc\xce\x64\x8d\x7e\x7b\x55\x40\xff\x7c\x0a\x3c\x07\xe4\x4d\x97\x5f\xba\xeb\x7d\x80\xf2\x64\xf6\x2b\xf5\xdc\x59\xde\x1d\x3e\xcd\x8b\x01\x21\x37\xd8\x8e\x1a\xad\x7b\xbc\x1b\x10\x80\x5a\x3d\xa7\x77\x5f\xee\xb6\x0f\x9a\xd7\xf7\x5e\xd8\x23\x67\xa7\x50\x12\xc6\x2e\xb9\xff\x88\xe7\x07\x3c\x0d\x1f\xfa\x08\x41\x00\x68\xfb\xb3\x04\x7b\x3e\x45\x10\xa2\x53\xe5\x71\x82\x1f\xec\xc4\xb9\x44\xf3\x82\x90\xec\x85\xdc\x2b\x44\x78\xb3\x22\xb6\x48\x0e\x92\xbf\x2e\xf2\x3a\x90\xdc\xc2\xc2\x6d\xe9\xe7\x70\xcc\x43\x06\x8e\xd4\x2d\xc9\xe8\x8e\xbb\xfe\x80\xba\xea\x93\xd2\xd3\x01\xf9\xf5\x8e\x6b\xfb\xca\x0b\xd9\x70\xb6\x32\x32\x3e\xb7\x09\x5b\xde\xf0\x05\x90\x11\x4f\x71\x57\x39\x8e\xba\x11\x1f\x4c\xb9\x2f\xaf\x72\xd7\xcb\x66\x1d\x59\xf6\xb5\x96\xbb\xa5\x61\x07\x94\xea\x31\xab\x6e\x8e\xb9\x37\x1d\x83\x79\x0e\xbc\x94\x34\x1a\xa2\xbf\xf7\xec\x35\x0d\x19\xbb\xd5\x97\xd4\xd4\x98\x09\x80\xde\x03\x68\x2e\xc2\xbc\xe6\xf6\x01\xaf\x53\x1b\xde\xc2\xfb\x54\xa0\x74\x65\x9c\xab\x01\x8d\x90\x2b\x3e\x77\xe9\x65\x9b\x72\xba\x64\x41\xc5\x81\x0b\xa1\x69\x5b\x3c\x74\xb7\x60\xd0\x2d\xf7\x7c\x75\x64\xad\x71\x1b\x93\xab\xc6\xd9\xaa\x7b\xd9\xc7\xad\x42\x22\xea\x79\x69\x7a\x0e\x84\x0d\x6d\x85\x1f\x68\x2a\x5a\xbf\x98\xc3\x38\x14\x52\xdb\x23\xc2\x95\x47\xb3\x34\x92\x8d\x55\xd5\xab\xc8\x9a\x00\x54\x43\x21\xef\x17\x61\xdb\x90\x51\x65\x64\xb6\x97\x73\xc6\x28\xb7\xe7\xa2\xdc\xc4\x0b\x05\x3f\x8c\x19\xfc\xe1\xca\x99\x40\x40\xa0\x62\x6a\xfd\x51\x96\x56\x8b\x92\xab\x1b\x88\x81\xe8\x5f\xf7\x85\xd9\xee\x78\xf9\xee\xd8\xd7\xee\xe8\xd7\x61\x51\xf3\x3d\xe2\xe6\x8f\x19\x39\x7f\x50\xec\xbc\x05\x62\x7b\x3c\x7b\x9f\xe8\x79\x0b\xd0\x1c\xc6\xd1\xf1\xf3\x16\xc0\x7f\x7d\x40\x04\xbd\x05\xa4\xf9\x7c\x7c\x0c\xbd\x75\xa2\x0f\x89\xa2\xb7\x00\x2d\x49\x9e\xd0\xb5\xc9\xd6\x48\xfa\xa3\x6c\x94\xb6\x78\xfa\x23\x45\xd4\x0f\x88\xa9\xef\x1d\x55\xef\x8c\xab\x3f\x0a\xd2\xda\xa3\xeb\x0f\x8c\xaf\xb7\xe0\xcc\x44\xdd\x1f\x1c\x61\xdf\x89\xd0\xfa\x03\xb1\xfb\x3d\x50\x51\x7b\x0c\xf6\xa1\xef\x9c\x90\x03\x1f\x8a\xad\xfa\x3e\x76\xbd\x71\x52\xbc\xf2\x6a\x1d\x94\xbf\x1f\xfd\xc6\xc9\xff\x05\x00\x00\xff\xff\xb6\xda\xe7\x52\xb8\xa1\x00\x00"), }, "/compute/beta/instance_template.yaml": &vfsgen۰CompressedFileInfo{ name: "instance_template.yaml", @@ -342,9 +342,9 @@ var Assets = func() http.FileSystem { "/dataproc/workflow_template.yaml": &vfsgen۰CompressedFileInfo{ name: "workflow_template.yaml", modTime: time.Time{}, - uncompressedSize: 126603, + uncompressedSize: 126599, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x7b\x6f\x1b\xb7\xb2\xf8\xff\xfd\x14\x03\xe7\x02\x7e\xc0\x92\x92\xb4\xa7\xb8\xd7\x07\xfd\x01\x8e\xed\x34\x6a\x1c\xdb\xc7\x72\x5a\x9c\x63\x04\x36\xb5\x4b\x49\xac\x77\xc9\x3d\x24\xd7\x8e\x1a\xe4\xbb\xff\xc0\xd7\xbe\xb4\x2f\xc9\x52\xe2\xa4\xbb\x40\x1b\x59\x5a\x0e\x87\xe4\xcc\x70\x66\x38\x33\x7c\x06\x47\x2c\x9a\x73\x32\x9d\x49\x78\xf9\xfc\xe5\x4b\xf8\x95\xb1\x69\x80\xe1\xf4\xf4\xa8\x0f\x87\x41\x00\x97\xea\x27\x01\x97\x58\x60\x7e\x8f\xfd\xfe\x0f\xcf\xe0\x87\x67\x70\x4a\x3c\x4c\x05\xf6\x21\xa6\x3e\xe6\x20\x67\x18\x0e\x23\xe4\xcd\xb0\xfb\x65\x1f\x7e\xc7\x5c\x10\x46\xe1\x65\xff\x39\xec\xa8\x17\xb6\xec\x4f\x5b\xbb\xff\xfc\xe1\x19\xcc\x59\x0c\x21\x9a\x03\x65\x12\x62\x81\x41\xce\x88\x80\x09\x09\x30\xe0\x8f\x1e\x8e\x24\x10\x0a\x1e\x0b\xa3\x80\x20\xea\x61\x78\x20\x72\xa6\xbb\xb1\x40\x14\x1e\xff\xb6\x20\xd8\x58\x22\x42\x01\x81\xc7\xa2\x39\xb0\x49\xf6\x3d\x40\xd2\x60\xac\x9e\x99\x94\xd1\xc1\x60\xf0\xf0\xf0\xd0\x47\x1a\xdb\x3e\xe3\xd3\x41\x60\xde\x14\x83\xd3\xe1\xd1\xc9\xd9\xe8\xa4\xf7\xb2\xff\xdc\xb4\x79\x4f\x03\x2c\x04\x70\xfc\xdf\x98\x70\xec\xc3\x78\x0e\x28\x8a\x02\xe2\xa1\x71\x80\x21\x40\x0f\xc0\x38\xa0\x29\xc7\xd8\x07\xc9\x14\xc6\x0f\x9c\x48\x42\xa7\xfb\x20\xd8\x44\x3e\x20\x8e\x7f\x78\x06\x3e\x11\x92\x93\x71\x2c\x73\xd3\xe5\xf0\x23\x22\xf7\x02\xa3\x80\x28\x6c\x1d\x8e\x60\x38\xda\x82\x57\x87\xa3\xe1\x68\xff\x87\x67\xf0\xc7\xf0\xea\xcd\xf9\xfb\x2b\xf8\xe3\xf0\xf2\xf2\xf0\xec\x6a\x78\x32\x82\xf3\x4b\x38\x3a\x3f\x3b\x1e\x5e\x0d\xcf\xcf\x46\x70\xfe\x1a\x0e\xcf\xfe\x0d\x6f\x87\x67\xc7\xfb\x80\x89\x9c\x61\x0e\xf8\x63\xc4\x15\xfe\x8c\x03\x51\x13\x69\x56\x6f\x84\x71\x0e\x81\x09\x33\x08\x89\x08\x7b\x64\x42\x3c\x08\x10\x9d\xc6\x68\x8a\x61\xca\xee\x31\xa7\x84\x4e\x21\xc2\x3c\x24\x42\x2d\xa7\x00\x44\xfd\x1f\x9e\x41\x40\x42\x22\x91\xd4\xdf\x2c\x0c\xaa\xff\x03\xa1\x13\x76\xf0\x03\x80\x24\x32\xc0\x07\x70\x8c\x24\x8a\x38\xf3\x06\x7f\x30\x7e\x37\x09\xd8\xc3\x15\x0e\xa3\x00\x49\xfc\x03\x80\x8f\x85\xc7\x49\xa4\x40\x1d\xc0\xd5\x0c\x27\x2f\x43\xf1\x65\xe0\x58\xb0\x98\x7b\xaa\xd5\xc7\x9e\xef\x05\x3d\x21\x79\xec\xc9\x1e\x45\x21\x3e\x58\x78\x3d\x79\x6b\x86\x44\x8f\xa0\xf0\x00\x26\x28\x10\xf8\x87\x08\xc9\x99\x50\xc8\x4d\xb1\x54\xff\x94\xa0\x30\x89\xa9\xa7\xfe\x52\x94\xa9\x97\x76\x8a\x15\x41\x4e\x18\x0f\xf5\x98\x01\x8d\x59\x2c\x01\x95\xf5\x09\x10\x21\x8e\x42\x2c\x31\x17\x06\x7c\x0f\x2a\x11\x54\x8f\x23\xaf\x03\x90\x3c\x76\x5f\xe6\x50\x3a\x84\x49\x1c\x04\x40\xa8\x90\x9a\x19\xd8\xa4\xbc\x6b\x45\x9c\xf3\xb6\x43\xd2\x2f\x7f\x03\x83\xf2\x71\x80\x25\x6e\x3b\x2a\xf3\xf6\x13\x1d\xc4\x61\x10\x2c\x3b\x8e\x20\x58\x6e\x24\x11\x67\x7f\x62\x4f\xd6\x0d\x40\x78\x33\x1c\xa2\x03\xfb\x17\x80\x9c\x47\xf8\x00\x94\x08\xa2\xd3\x1c\xac\x80\x79\x9a\x30\x1e\x01\x2c\x20\xa2\x35\x8f\xa9\x77\x4b\xe8\x31\x44\x74\xfe\x0d\xcf\x81\xda\xc3\x18\xc5\x54\x6a\x24\xcd\xab\x16\xdf\xe2\xa0\x1c\x04\x2b\x34\x2b\x88\xd2\xc8\x34\xe2\x27\xe3\x14\x83\x4f\x9f\xec\xc7\xcf\x9f\x07\x0e\x61\xf5\xad\xfb\xfc\xf9\xf3\xe0\xa1\x00\x4c\xfd\xac\x46\xf8\xf9\x73\x0e\x6c\x84\x38\xa6\xb2\xe7\x31\xaa\xb6\x54\xcc\x8b\x93\x69\xde\x0a\xd0\x18\x07\xe2\x00\xcc\xbf\xb9\x9f\x94\xac\xf5\x38\x56\xa3\xc9\x4e\x51\xa9\x20\xce\xfe\xc4\x31\xf2\x7b\x92\x84\x98\xc5\xf2\x00\x9e\xe7\x7e\xd3\x92\xaa\xea\x47\xc3\x28\x8b\xbf\x9a\x45\x60\xe3\x32\x4a\xb0\x7f\x9b\x35\x4e\xfe\x88\x02\xe4\xe1\x10\x53\x99\x7c\xf3\x27\x1b\x8b\xf4\xe7\xdc\x44\xf4\x8a\x94\x11\x71\x16\x61\x2e\x09\x16\x29\x21\x98\x89\xb8\x22\x21\x4e\xbf\x2b\xa1\x35\xf3\x18\xba\x3f\x00\x1f\xd9\xe1\x64\x7e\x33\x43\x9d\x32\xbb\xd3\x1d\x25\x70\x33\xef\xa8\x19\x3c\xa7\xc1\x3c\x37\xef\x0b\x9c\x77\x1e\xcb\x28\x96\xc0\x68\x30\xef\x6b\x36\x54\x3d\x81\x74\x3b\xec\x03\x12\x16\x6b\xbf\x9f\xeb\xff\x2e\x1e\x63\x4e\xb1\xc4\xa2\x47\xc2\x30\x96\x4a\x03\x2a\xf4\xe4\xa3\xe9\x95\x5d\x85\xe6\xd1\x16\x46\x74\x9c\xb4\xad\x44\x5c\xff\x8b\x82\x3e\xd8\x17\xc1\x8f\xb9\x91\x13\x4e\x83\x39\x3e\xfc\x55\x09\x60\xb5\x6a\xfb\x4e\xf9\xc1\x3e\x10\x9a\x81\x09\x20\xb0\xc7\xa8\x2f\x60\x47\x60\x0c\xd7\xbf\x8d\xce\xcf\x80\x63\xf5\x2e\xa6\x46\xa1\x51\x30\x1c\xec\x0f\x3b\x4a\x67\x14\x07\x83\x81\x8f\xef\x71\xa0\x96\x58\xf4\xa7\x5a\x45\xee\x7b\x2c\x1c\x44\x9c\x49\xe6\xb1\xa0\x37\x8e\x27\x13\xcc\xc5\xc0\x67\x9e\x30\xdf\xfe\xf8\xec\x4f\xc1\xe8\xee\x6e\x3f\xd7\xbb\x9b\xf2\xdc\x00\xc2\x58\x48\x18\x63\x98\x70\x16\xc2\x8b\xe7\x10\x12\x1a\x4b\x2c\x60\x67\xeb\xe7\xe7\xcf\xc5\xd6\xae\x92\x8e\x2f\x7f\x82\x19\x8b\xb9\xfa\xf2\x7f\x7f\xfe\x49\x7f\x5d\x0e\x99\xc3\x18\x4f\x09\x15\xf0\x30\xc3\x54\xcf\xcb\x84\x70\x21\xd5\xb4\x28\x65\x53\xc4\xe3\x90\x48\xb5\xbe\x30\x34\x9a\xb2\x13\x0d\xea\x57\x1e\x53\x9a\x5f\x28\x00\x24\xf5\x6b\x98\xfa\x4e\xb7\x76\x03\x88\x30\x27\xcc\xdf\x07\x25\xa1\x39\x0e\x11\xd1\xca\xa2\x9a\x7f\x40\x1c\x83\xa7\xb6\xc4\x20\xc0\xfe\xbe\x6a\x95\x83\x99\xed\x13\x53\x1f\x6b\x20\x3e\x90\x02\x46\x8a\x1c\x2d\x4a\x5a\x33\x86\xeb\x10\x51\x34\xc5\x3e\x78\x41\x2c\x24\xe6\x1f\x76\x06\xbe\xd3\x2d\xf5\xd4\x7b\x8c\x2a\xeb\x41\x24\x02\x4f\x0c\x62\x41\xe8\xb4\x97\xfc\xfd\xcc\x63\x74\x42\xa6\xb1\x22\xc8\x1b\xc6\x6f\x04\x0e\xb0\xa7\x94\xf6\x1b\x74\x63\xa1\xee\xee\xe7\x90\x55\x28\xd9\x5f\xb4\xba\xae\x25\xce\xf2\xfc\xa1\xe6\x65\x91\x33\x10\xe7\x68\x5e\xcd\x18\xbf\xa5\x22\x68\x81\x25\x2e\xad\x3c\x33\x8c\x7c\x4c\x38\xf6\x94\x09\x71\xe8\xcd\xbd\x80\x78\xf0\x2b\x47\xd1\x4c\x2d\x99\x82\xa1\x48\xc8\x2c\xfd\xd2\x78\x27\xca\x36\xa6\x7e\x0f\x87\x91\x5c\x14\x31\x76\x5b\x20\x42\xf6\xcc\xa8\xd4\xc7\xcc\xef\x44\xe2\x30\x37\xf6\x52\xf9\x5c\x98\x00\xf3\x46\x71\x17\x2c\x4c\xc8\xa2\x54\x37\x4f\x0f\x84\xc4\xd1\xd0\xcf\x7d\x59\x26\xa3\xcd\x33\x43\x3e\x63\xd1\x6f\x6c\x5c\xfc\xa1\x06\xd1\x92\xd5\x7a\xe3\xc0\x54\xbf\x58\x3d\xaa\xea\xc6\x15\x82\xf0\x37\xc3\xd1\xc8\x76\xab\x08\xac\x5f\xd2\x71\xab\x25\x6e\x9a\x20\xf5\x20\xee\xcd\xc8\x3d\x7e\xcf\x49\xe9\xcf\x55\x14\x5d\x39\x5b\x87\x29\xbc\xd2\xd7\x73\xc3\xde\x4e\xc7\xfd\xe6\xe8\xf5\x08\xde\x5f\x0e\x85\x56\xb7\x0d\x10\x4d\xe0\x63\x0c\xf8\xa3\xe4\x48\xb1\x41\x29\x44\x50\x36\xba\x93\x30\x4a\xa8\xf8\x9a\x69\x18\xd7\x6e\x03\x3b\x8d\x3e\x27\xf7\x98\x6b\x43\x17\x24\x12\x77\xa2\x0f\xa3\x38\x8a\x18\xaf\x86\xaa\xfd\x16\x6a\xf4\xe2\x00\xfa\x7f\x22\xbe\x0f\x7d\xe9\xfe\xdf\x9f\xfe\xa5\x3e\xa8\xff\x33\x0e\xfd\xbf\x48\xd4\xdf\xae\x98\x9d\x25\x56\x0a\xda\xb0\x65\xf1\xd5\x6a\x06\xcd\x4c\xd0\x22\xab\xa6\x4f\xc5\x66\xbe\xd8\x53\x42\xe8\x95\xef\x22\x3e\x5d\x17\x15\x4d\x5b\x90\x4f\x46\x7d\x98\x61\xd5\x79\xac\x34\x3d\x4d\x35\x11\x12\xfa\x5f\x45\x17\x66\xed\xfb\x70\xcc\x2a\x86\x47\x99\xb2\x51\xbc\x20\xf6\x33\x50\xf6\x41\xc4\xde\x0c\x90\x80\xdb\x5e\x40\xc6\x7f\x22\x2e\x6e\xd5\x62\xdf\xf6\x8e\x27\x8c\xfd\x32\x46\xfc\x56\x6d\x81\xa8\x7c\xc6\x41\x6d\x94\x8a\x76\x05\x96\x0a\x86\xda\xa8\x53\x46\xdc\x07\x41\x94\x65\x89\xc0\x63\x41\x40\xb4\x4b\x4d\x3b\xbd\x3c\x2f\xe6\xf5\x40\x63\x81\x15\x15\x2b\x7c\x19\x57\x74\xae\x41\xeb\x5d\x40\xfb\x72\x16\x85\x05\xfc\xed\xc8\x50\x71\xee\xda\x04\xda\x6b\x0b\x6c\x19\x72\xd4\xc2\x6c\xc7\x8a\x9e\x23\x16\x46\x48\x92\x71\x80\x35\x2c\x31\x17\x12\x87\xbb\x5a\xd6\x55\x8c\x93\x4d\xf4\x10\x9c\xf8\xf3\x58\x44\x8c\x35\xdd\x52\xca\x55\x80\x55\xb2\x2f\xeb\x96\xb4\x72\xf0\xbd\xc0\x93\x38\xd0\x0a\x37\x45\xe4\x1e\x07\x73\x6d\x84\x07\x01\x0e\xec\x2b\x1d\x49\xc1\x9f\x88\xbf\x5e\x27\x55\xfd\x96\xc2\x5b\x86\xb0\x7e\x43\xdc\xec\x4b\x7a\xa7\x94\x0c\x90\x9f\x10\xc6\xd1\xe9\xe1\x68\x74\x71\x78\xf5\xa6\x86\xac\xd4\x7b\x39\x5a\xc9\x6c\x88\xdd\x2a\x43\xc0\xa6\x53\x42\xa7\x47\xda\x98\xa8\x5b\xe7\x0a\xcd\x11\x16\x17\xfa\x34\x0b\xb2\xbe\x41\x0b\x2d\xb2\x19\x5a\xcd\x06\xc9\x63\xaa\x1d\x03\x01\x9b\x82\xb1\x97\x34\xd3\xab\xfd\x03\x7f\xc4\x5e\x2c\xd7\xb7\x7d\xd4\xeb\x9c\x1a\x4f\x4d\x7e\xa7\x6c\x7a\xaa\x6c\xf0\xca\xd7\x5a\x4c\xb8\x7a\x90\xef\x13\x33\xd4\x8b\xc6\x9e\xa1\x1d\xcd\x2c\xba\x33\xf2\x18\x57\x36\xcb\x6b\xb8\x6a\xe2\x23\xcc\x7b\x11\xf2\xee\xd0\xd4\x4c\x7e\xa0\x01\x24\x2e\x0e\xa7\x9f\x5c\xcd\x2a\x84\x81\x79\x94\x82\xe0\xb4\x94\x2d\xce\x98\xdc\x02\x07\x54\x21\xa8\xc4\x80\xb3\x82\x31\xa8\xdf\x15\xb1\x60\x5e\xbe\xa2\xe6\x39\xf9\x88\xc2\x28\x50\x0a\xee\xf6\xb6\xc7\x42\xeb\x04\x81\x5f\xe0\xf5\xe1\xd5\xe1\xe9\xf6\xf6\x3e\x6c\x6f\x2b\x48\xf0\x0b\x0c\xcf\x5e\x9f\x9b\x2f\x18\x9f\xda\x93\xb6\x1a\xc8\xbf\xc0\xf1\xc9\xab\xf7\xbf\x6e\x6f\x97\xab\xc5\xb0\x3c\x51\x85\x88\xd0\xa3\x00\x89\x5a\xf9\x5b\xb3\xa2\x85\xd5\x7c\xe7\xc0\x35\x73\x91\x5a\x42\x3d\xc3\x56\x80\x9a\xf5\xda\x16\x1a\x25\xf0\x14\x10\xc3\x60\x7f\x5a\xe9\x5c\xa5\xbc\x19\x07\xac\xda\xbc\x8d\xf7\x41\xe9\xa8\xce\x4d\x64\x6d\x18\x1f\x4f\x50\x1c\xc8\x54\x98\x2b\x95\xd3\x9e\xe2\xd5\xd9\x3f\xb7\x7f\x22\x7e\xa3\xfa\xbe\x89\x39\x11\xb7\x6b\x61\x63\x35\xbc\x74\x9f\x5a\xdf\xb4\xa7\x30\x5b\x58\x88\x6a\x5e\x9d\x6d\xe8\x16\xc0\xcd\x73\x71\x46\x15\xc2\x55\x73\x6f\x56\x29\x4b\xf0\x53\x71\x30\x18\x4c\x18\xeb\x8d\x63\xef\x0e\xcb\x01\xa2\x28\x98\x4b\xe2\x89\xde\x98\x50\xc4\x09\x16\x03\x6b\x78\xf6\x62\xad\x22\xf5\x42\x2c\xb9\xfa\x3d\xe4\xca\x1e\xac\x24\xed\xed\xed\x99\x3f\x11\x07\x03\x19\x46\x03\x89\x85\xec\x09\xd3\xe9\xc0\x8b\x85\x64\x61\xef\x81\x71\xdf\x63\x31\x95\x06\x0a\x6c\x6f\xab\xc1\x1c\x0c\x06\x83\x19\x0b\xf1\x20\x16\x7c\x10\x90\xf1\xc0\xb8\x2f\x7a\x21\x8a\x38\xf6\x63\x0f\x2f\x7c\xd1\xc3\x76\x34\x06\xce\x5a\x0c\xd0\x26\xc1\xdd\x28\x8d\x97\x91\xc4\xcb\x12\x4f\x0a\x71\x99\xad\xef\x10\x42\x14\x45\xda\xdb\x38\x71\xe3\x9b\x6b\x86\xd6\x1a\xd4\x3d\x0a\x62\x2c\xf6\x2b\x50\x74\xe7\x58\xa9\x64\x35\x7b\x71\x3f\x83\x8c\x36\xd8\xf4\x1b\x01\xf1\xa4\x89\x6a\x30\x50\x2b\x80\x2a\x9b\x70\x3c\x37\xee\x6d\x77\x2e\x7e\x78\x31\xd4\x12\x7e\x8c\x81\xdd\x63\xfe\xc0\x89\x94\x98\xf6\xe1\xc8\xd8\x7a\x5a\xea\x47\xf5\xe3\x37\x80\x09\x85\x01\x96\x9e\xa5\x96\x81\x42\x6b\xb0\xd7\x13\x44\x62\xad\xeb\x69\x46\xc0\x42\xbd\x16\x0b\xcc\xc1\x63\x3e\x7e\xb4\xb4\x98\x91\x7b\xfc\x78\x3f\x9b\x01\xb2\x92\x97\xad\xa2\x69\xa3\x8f\x8d\xdc\xe3\x8d\x7b\xd8\x94\x90\x22\x34\xc6\xe7\xf4\x35\x22\x41\xcc\x71\x1d\x67\x8d\x19\x0b\x30\x2a\x97\x62\xc5\x03\xa3\x22\xdc\x65\x98\xe2\x8f\x19\xd6\xe1\x24\x86\xb4\x35\x1c\xa7\x04\xd2\x29\xfc\x37\xc6\x4a\xfe\x01\x99\x54\x99\x92\xfa\x95\x39\x4c\x10\x09\xec\xe6\xe7\x76\x2e\x4d\xf9\x6a\x7a\x6f\xf5\x81\xe0\x6d\x1f\x46\x58\x6a\xa8\x92\xc1\xad\x9a\xbf\xdb\x7a\x8f\x89\x11\xb6\xe6\xa4\x23\x45\x89\x50\x1f\x47\x98\xfa\x98\xca\xd4\x3c\xb5\x78\xae\x65\xb7\x7b\x22\x16\x5e\xce\x0f\xea\x36\xba\x4a\x53\xaf\xc1\xd2\x53\xe4\xad\x43\xbc\x8c\x9d\x67\x2d\xbf\x77\x28\xba\xd4\x1b\x08\xec\xbc\xbb\xdc\x75\x6e\x00\x25\x68\xec\x7e\x5a\x01\x55\x83\x1b\x61\x7e\x8c\x8d\x1f\xf5\xfd\xf1\xeb\xce\x6a\xfc\xfb\xed\x99\x6a\xe9\x97\xde\x34\xc9\x3d\xfe\x16\xb7\xcc\xfe\xc7\x30\xd8\xb7\x3f\x92\x7b\x6c\x7e\x52\x9f\xd2\x5f\x6b\x9c\x6d\x1b\xd9\x6b\xd5\xa3\x85\xef\x5a\xf5\xf2\x7f\x65\x20\xb6\xb3\x88\x8a\x4a\xb9\xf9\x39\x59\x58\x25\x47\x84\x11\x19\xeb\x94\xd2\x7a\xe4\xa7\x49\x00\x52\xf9\xb0\xdb\x7b\x67\xfe\xe5\xc0\xad\xec\x99\x31\x9a\x47\x3d\x9c\x42\x74\x99\x8e\x89\x62\x93\xb5\xce\x4b\xd5\x91\xa9\x7b\x7a\xae\xb7\x15\xdd\x35\xb6\x75\x93\x9b\xa6\x7a\x77\x84\xf2\xc9\xaf\x66\xcc\xa2\x21\x98\x3f\x18\x77\x1a\x8a\x64\x56\x47\xc0\x7d\x1d\x3a\xec\x33\xa0\xac\xda\x4f\x04\x40\x6d\x5c\x2f\xa6\x7e\xa2\xc5\xd8\xb0\x12\xc2\xa8\x91\x44\x08\x04\x0e\x89\xc7\x02\x46\xfb\xf0\x2e\x0e\x24\x89\x2a\x8d\xfa\xcc\xe4\x24\x47\x3e\xce\x5c\x57\x8c\xcf\x28\xb6\x6c\xa8\xe4\x97\xc0\x4a\x77\xd1\x0a\x0d\x46\xde\xac\x06\xe6\x22\x22\x6f\x30\xd7\x7a\x15\x52\x6a\x91\x36\xfc\x4c\x64\x62\x4e\x1e\x0a\x4a\xa2\x08\xd7\x4d\x80\x66\x50\x73\x8a\x94\x92\xbf\x8e\x24\xd0\x68\xcf\xad\x5a\xac\xd4\x79\xd8\xb2\x8a\xfd\xd6\x01\x7c\xaa\x01\xb9\x95\x70\xa5\x7a\xd1\xfc\x49\xb0\xd8\x3a\x80\x6b\xfb\xdb\x8b\xad\x7d\xfb\xe9\x65\xf2\xe9\xc7\x7f\xea\x7f\x7e\xda\xaa\x12\xa1\xea\xf9\x00\x9f\xe1\xf3\xda\x7c\x48\xae\x51\x6b\x65\x03\x96\x52\x38\xa0\x49\xe9\x80\xb6\xae\xc7\xf6\xca\x87\x61\x91\xdf\x11\x27\x6a\xe4\x4f\x51\x03\x19\xe5\x31\x6c\xe1\xf3\x49\xf5\x90\x77\xa9\x16\x62\x58\xf5\xde\x82\x29\xda\xef\x15\xb8\xee\x28\xa1\x71\x8f\x02\x65\x35\x58\xdd\x59\x6f\x49\x1e\x0b\x43\x44\xfd\x03\xb8\x1d\x9d\x5c\x69\x58\xbf\x6c\x69\x40\x5b\xff\xbc\xdd\x7d\xfc\x51\xbe\x8d\xa5\x5c\xce\x1c\x6e\xbb\x06\xb5\xf3\x5f\x3c\x7d\xc8\x06\x73\x36\x4f\xb7\x92\xac\x06\x77\x6d\x70\x08\xc1\x3c\xa2\x23\x08\x4d\xa2\x06\xd1\xe7\xd8\x7d\x03\xb5\x04\xb3\x3b\x3c\x4f\x1d\x9a\x63\x2c\x1f\x30\xa6\xf0\x42\xab\x42\x3f\xff\x08\xde\x0c\xe9\x28\x0e\x2e\x20\x60\x74\x6a\x82\xc3\xf4\xdb\x4a\xab\x62\x3c\x2c\x1b\xab\x59\xb3\x09\x0b\x02\xf6\xa0\x08\x81\xe3\x69\x1c\x20\x9e\x11\xd9\x07\x10\x7d\x3a\x0d\x3e\x47\x9f\x4e\xd9\xe7\x4f\xcf\xf7\x7f\x7e\xf9\xd9\xe0\x57\x4d\x18\xab\x63\x68\xf1\x29\x81\x59\x8f\xe1\x75\x8a\x62\xf4\xe9\xec\xf3\x4d\xef\x83\xc2\xf4\xc7\xcf\x70\xc6\x20\x64\x1c\x2b\xa1\x5c\x66\x75\xfd\xf8\xd2\x2d\x87\xdd\x5a\x92\x25\xf1\xdd\x06\x31\x25\xf7\x98\xea\x65\x59\xa4\xda\x25\x28\x36\x22\xd3\x47\x3b\x70\x2e\x34\x8c\x55\xfc\x37\x15\x2d\x9b\xdc\x37\x17\x64\xda\x79\x6f\x3a\xef\x0d\x7c\x9f\xde\x1b\x45\xdd\x47\x01\x51\x53\xb6\x06\xe7\x8d\x82\xd6\x39\x6c\xcc\xf3\x54\x8f\xf9\x8d\x18\xec\xce\xf8\xcb\x9f\xee\x8c\xff\xbb\x3f\xe3\xff\x9b\x79\x52\x57\x38\x7d\xbc\x20\xd3\x6f\xdc\x8f\x1a\x91\xa9\xf9\x25\x22\xd3\x7e\x26\x24\xb6\xf3\xa3\xd6\xf8\x51\xad\x3e\x50\xef\x9c\xfb\x2e\x1d\xac\x66\x4f\xec\xfc\xab\x9d\x7f\xb5\xf3\xaf\x76\xfe\xd5\xce\xbf\xfa\xe4\xfd\xab\xca\xd0\x4c\xdd\xab\xda\xb5\x7a\xad\x61\x7c\x58\x87\x67\x35\xe2\x58\x0b\x62\xa5\x54\x8c\x74\x32\x60\xa5\x9b\xb5\x5c\x56\x2e\x28\x75\x0b\xf0\xda\xba\xa1\x94\x98\x64\xf6\xaf\x64\xcf\xc9\xe2\x67\x32\x73\x24\x8e\x6e\x88\x5f\xba\x0b\x0d\x27\x3a\xf5\x28\x91\x67\xfb\x26\xba\x91\x8d\xe1\x81\x04\x01\x08\x89\xb8\x74\x99\xba\x3a\xff\x97\xda\xb5\x72\x79\xaf\x8f\xf4\x7c\xb5\x64\xd4\x76\x0c\x5a\xc9\x98\x0d\xf4\xdc\x86\x11\xd5\xf6\x21\xd9\xe3\x1d\x94\x0e\xcc\x4a\x3e\xca\xca\xc6\x8d\x6e\x4a\xdd\x72\xf3\x9e\x4a\xed\x2b\xba\x42\x6b\x4a\xc1\x3b\x4a\xc0\x2d\x63\xee\xd8\xb1\x1a\x5c\x40\xa2\xa9\x71\x7e\x49\x89\xbc\x99\x91\x10\x44\x18\x51\xd3\xf9\xa1\x3a\xdf\xf2\xb7\xe1\x5b\x7e\xb2\xee\x42\x27\x91\x3a\x8f\x61\xf9\xd3\x79\x0c\xbf\x7b\x8f\x21\xd3\xa5\x6f\x5e\x9b\x32\x3b\x6b\xf1\xe0\x9c\x67\x20\x2e\xcb\x4b\xa6\xdc\x8f\xae\x19\x38\x23\xde\xcc\x8a\x42\x83\xa3\xd1\xe9\xc6\x55\xd3\xe3\x13\x11\x05\x68\xae\xec\x6f\x57\xcc\xcf\xee\xa5\x3e\xf3\x74\x46\x7a\x5a\x17\x47\xb8\xca\x05\x16\x74\x05\x48\x83\x4d\x39\xf1\x74\x8e\xd9\x56\x8e\xd9\x2a\x36\x79\x6f\x1d\xb3\x02\x4b\xb7\x4c\xd7\xc2\xba\x38\xd2\xb9\x4a\x0b\x0d\x19\x05\xd6\x1f\xf7\x09\xb3\x25\x6d\x62\xce\x31\x95\x03\xf1\xdf\x60\x20\xb0\xec\xd9\xc6\xfd\x99\x0c\x83\xdd\x8a\x4e\x4f\x72\xb6\x96\xae\x80\xa3\xe9\xa4\xe7\x5a\xc3\x24\x40\x53\x97\x49\x66\xd1\x3a\x3a\x1d\xfe\xad\x3c\xa5\xa3\x7f\x9d\x7e\xdf\xfe\x50\xb7\xe9\x77\x2e\xd1\xce\x25\xda\xb9\x44\x3b\x97\xe8\x97\x75\x89\x46\x73\x11\x21\x7e\xf7\x78\x57\x4c\x02\x67\x25\x5f\x4c\x75\xeb\x46\x67\xcc\x7c\xa4\x9a\xae\xc1\x1b\x53\x2d\xfd\x7a\x3a\x13\xf9\x62\x2e\x67\x8c\x56\xed\x70\x7f\x87\xaa\x5c\x75\x15\x6b\x94\x2c\xb2\x72\x94\xf1\x4c\x31\xae\xba\x04\xfa\xe6\x62\x5c\x88\xfa\x5d\x35\x2e\xf7\x7c\xaf\xd5\xb8\x7a\xca\xa0\xb5\xc5\xb7\xf2\x65\xb6\x2a\x20\x2e\x5b\x7c\xab\xac\xcc\x56\x0d\xe8\xae\xf8\x56\xf6\x79\x1a\xc5\xb7\x9c\xcc\xca\xd6\xd1\xd2\xb5\x80\x7d\x6b\x1f\x55\x8c\xb2\x8d\xac\xaa\x29\x98\x55\x35\xbd\x5d\x81\x25\xf3\x7c\x93\x61\xba\x0d\xf5\xb4\xcc\x3e\xdf\xd5\xd3\xfa\x96\x3c\xe7\x89\xfe\xd8\xb9\xce\xcb\x9f\xce\x75\xfe\xdd\xbb\xce\x17\x6c\x94\xf5\x55\x78\xaa\x37\x7d\xa0\xbe\xe6\x75\xd1\xcd\xa7\xab\x6c\x59\x21\x6b\x6a\xf2\x56\x69\x8e\xb1\xc0\x4a\x43\xcc\x92\xc0\x3b\x9b\xeb\x85\xa0\x1f\xcd\x75\xf3\xb5\x30\xe4\xdf\xd5\x1f\xbe\x44\xa0\xb2\xb1\xb3\x9f\x7e\xb0\xb2\xde\x09\x4c\x44\xb2\xfe\xd8\xb3\xc7\xd5\xa2\xaf\xbe\x2b\x8b\x3f\xae\x00\xba\xb6\xa8\xe4\x28\xcb\x3d\xeb\xd1\x93\x72\x0c\xb9\x82\x0b\x20\xad\x39\xca\x26\x59\x5e\x4c\xac\xbb\x2a\xd2\xb5\x31\x61\xd6\xe9\x32\xe1\x28\xc4\x4a\xc5\x2e\x1a\xfc\x89\x69\x1f\xcd\xf7\xa1\x8f\xa7\x26\xf7\xb2\x02\x68\x67\xe0\xdb\xfb\x69\xfc\x38\x20\xb4\x44\xaf\x5a\xc6\x11\x37\x4a\xe0\xac\xe2\x88\xab\x69\x5d\xe3\x88\x4b\x71\x4f\x04\x06\x2a\xd7\x95\xd6\x18\x1e\x15\xa2\x8f\x36\x1a\x46\x5c\x60\xfe\x86\xc5\xbc\x8e\xb3\x08\x95\x78\x5a\xc1\xea\xee\x52\x17\x42\xe5\xcf\x3f\xb5\xdb\x12\x8b\x5d\x2f\x23\x98\xdf\xa1\x8f\x24\x8c\x43\xa0\x71\x38\xc6\x5c\xef\x8a\x44\x09\xe5\x08\x73\x7d\x7b\x08\x20\xbb\xdb\x55\x50\x93\x95\x96\x1c\xeb\x78\x42\xec\xab\x2d\x12\xa9\x3f\xe3\x40\x9f\x09\x59\xc3\x05\x7f\xd4\xb7\xdc\x19\xb1\x4c\x19\xed\xfd\x85\x79\xd5\x46\xab\x44\x1d\x8c\xf1\x84\x71\xec\xae\x20\xe1\xd8\xb1\x33\x22\x81\xda\xca\x0f\xf5\x2f\x49\xe7\xf6\xd7\x4a\x1f\x8d\x9c\x71\x24\x66\x3a\x9e\x27\x87\x92\xc8\x23\x64\xba\xfe\xc9\x4e\x81\xfa\xa9\x32\xe3\x31\xb9\x73\x05\x1e\x08\xf5\xd9\x43\x3a\x93\x49\x20\xd2\x8b\xe7\x6b\x2a\x94\x99\xac\xef\x15\x93\x28\xf8\x1a\x84\xa5\x3b\x5e\x07\x59\x11\x0a\x52\xc1\x7a\xa2\x64\xb5\xb0\x86\x2f\x7f\x7a\xfc\x22\xae\xe7\x4c\x63\xf4\x88\x13\x8d\xca\xb6\x4d\xe7\x19\xeb\x3a\xcd\xe8\xce\x23\xba\xf3\x88\xaa\x31\x76\xe7\x11\xdd\x79\xc4\x37\x4b\x75\xdd\x79\x44\xc9\xa8\x3b\x07\xb5\x79\xbe\xc7\xf3\x08\xa3\x11\x74\xc7\x11\x8b\xef\x3d\xd5\xe3\x88\x51\x77\x18\xd1\x1d\x46\x74\x87\x11\xdf\xf6\xed\x1e\x8b\x05\x1b\x5a\x5f\xf0\x51\xe5\xa5\xce\x06\x75\x7e\xaf\x17\x7c\xd4\xde\xef\xb1\x38\xa7\x8d\x57\x7c\xac\xc5\x01\xdf\x9d\xee\xe4\x9e\xc5\xd3\x9d\xee\x6c\x27\x5d\x9d\xe5\xbc\x4c\x97\xeb\x71\x33\x5d\x3e\xc6\xcf\x54\xda\xb8\x95\xa3\xe9\xf2\x0b\xc4\xcd\x5e\x76\x21\xb3\x9d\x8b\xaa\x38\x31\x9d\x8b\xaa\x73\x51\x7d\xb7\x54\xd7\xb9\xa8\x4a\x46\xdd\x39\x2d\xcc\xf3\xa4\x9d\x16\x97\x9d\xd7\xa2\xf3\x5a\xfc\xbd\xbd\x16\x97\x6b\x37\x9f\x2b\x15\x60\x58\x21\x70\xf2\xd2\xc5\x4c\x2a\xf3\xa6\x62\xdc\xd5\x31\x93\x97\x5d\xc8\x24\x7c\x59\xa3\xfa\xb2\xb3\xaa\x57\xb0\xaa\x47\xff\x0d\xd6\x63\x57\x1b\x40\x2b\x5b\xd6\x15\xcd\x5b\xd9\xd6\xa3\xff\x06\x1b\x8f\xe3\x78\xf2\xe7\x5d\x4a\xec\xf8\x3e\xf6\xab\x6f\xda\x50\x8f\x39\xe2\x4a\x5c\xa8\x9d\x9a\xf8\xc4\xd5\x44\xc3\x16\x9d\xa2\x58\xfe\x74\x8a\xe2\x77\xaf\x28\x76\xca\x4f\xfe\xa9\x50\x7e\x60\xf4\xaf\xd3\x6d\x61\x3e\x2b\x31\x51\xad\x0b\x55\xf9\x22\x52\x0d\x69\x19\x5d\x68\x1d\x72\xa3\xab\x90\xf4\x44\x2a\x24\x65\x76\x9c\xae\x46\x52\x57\x23\xa9\xab\x91\xd4\xd5\x48\xea\xca\xc6\x97\x25\x7e\x3d\xa9\xb2\xf1\xc9\xfe\x9f\x16\x8f\x1f\x9d\x5c\xd9\x02\xf2\xeb\xbc\x9b\x53\xe8\x2a\xef\x55\xae\x82\x76\xd7\x6a\x9a\x4a\xf1\xf5\x66\x7e\x5e\x30\xaa\x5e\x81\xd8\x3f\x88\x9f\x44\x05\xc5\x94\xfc\x37\xc6\x80\x42\x46\xa7\x80\x82\xb2\xd3\x90\x3f\xd9\xd8\x65\x3a\xe9\x99\x92\x76\xaf\xcb\x01\x56\x12\x48\x2b\x55\x48\x40\xc4\xf1\x84\x7c\x74\xe6\x51\x09\x44\xe2\xef\xab\xf7\x94\xed\x74\xab\xb4\xe6\x9e\x6f\x65\x55\xcf\x15\x96\xef\x29\xb0\x3d\xe2\xdf\x9a\xfb\x27\xcd\x29\x31\xa1\x65\xe5\xf7\x4b\xe0\x4f\x08\x0e\x7c\x98\x70\x16\x02\xd3\xb5\xa4\x15\x34\x91\x1f\xbb\x55\x47\x80\xd1\x60\x0e\x01\x96\xfa\xd6\xcd\x1d\xd4\xfb\x6b\x1f\x0e\x7b\xff\xd9\x2d\x13\x3e\x26\x53\x49\xc0\xce\xf3\xde\xff\xed\xee\x43\x4c\x7d\xcc\x85\xc7\x38\x16\xb0\x73\xb3\x6b\x70\x9c\xcd\xa3\x19\xa6\x02\x76\x7a\xbb\xda\xc3\x45\x99\x34\xa5\xf3\xcb\xc3\xac\xd4\x76\xa3\x45\x7a\x0a\x0c\x18\xb7\x50\xac\x27\xd4\x63\x54\x58\xd5\xc0\xdd\x18\xfa\xa3\xee\xeb\x1f\xcf\x4b\x20\xa6\x77\x88\xae\xe8\xc8\x59\xbc\x3b\xb6\x42\x4c\x34\x0b\x87\x0a\x92\x2e\x9a\xf9\xc5\x3b\x62\x6b\xac\xf0\xfa\xcb\x61\x73\x94\x59\xf0\x36\xdb\x96\xb6\x08\xae\x56\x6d\xd0\x54\x5f\x64\xaa\x80\x05\x81\x21\x73\xe3\x3b\x8c\x85\xa6\x06\x8f\x63\xfd\x82\x55\xe2\x1d\x6d\xe6\xc0\x12\x2a\x24\xa2\x1e\xb6\x97\xd2\xc2\xde\xde\x1d\x9e\x8b\xbd\xbd\x3c\x91\xbd\x50\x9d\xe4\x2e\x78\x6d\xb8\x7d\x56\x32\xb8\xbe\x7c\x7d\x04\x2f\x9e\xff\xf8\x8f\xb4\x8a\xeb\xc3\xc3\x43\x9f\x60\x39\xe9\x33\x3e\x1d\xf0\x89\xa7\xfe\x53\x6f\xf4\xe5\x47\xb9\x9b\x62\x60\x44\xdf\xde\x5e\x0e\xa2\xb5\x3a\xf4\xce\xb6\x0f\xe3\x58\xee\x03\xd1\xd7\x45\x08\x4c\xe5\x7e\x7b\x74\x73\x40\x33\xd7\xd2\x2e\x8f\x6e\xe5\xdd\xb3\x2d\x6e\x9d\x4d\x56\xfa\x87\x65\xa9\x9b\x79\x3a\xd5\x77\x91\xbe\x1b\xc9\xd4\xb6\xac\x22\x54\x4d\x9e\xf6\x9d\xc4\x3b\xc1\xb1\x60\x31\xf7\xf0\xb2\x68\xea\x2e\x97\x46\xf1\x0c\x85\xb8\x0a\xbd\x6d\x53\x57\x5a\xcb\x3b\xeb\xd1\xb2\xb8\xe5\xa2\x4f\x1d\x91\x27\x33\xbc\x5f\x8c\xd1\x30\x50\xc7\x46\x8d\x75\x4b\xed\x05\x2c\xf6\xad\x07\xa4\xef\xb1\x70\x80\x22\x22\x06\x3e\x16\x64\x4a\x07\xae\xa3\x1b\xbd\x39\xf7\x61\x0f\x5e\x17\x84\xe1\x6d\xc4\x99\x92\x2e\xa2\xcf\xf1\x94\x30\x2a\xfa\x0f\x05\xc3\x4a\xdc\xee\xe7\x26\x34\x87\xb4\xc3\x35\x07\x73\x66\x4f\x95\xd2\x9b\x91\x5d\x7a\x6b\xd2\xdb\xe0\x93\xfd\x74\x43\xfc\xcf\x03\xdb\xf5\xe0\x93\xf9\xf0\x79\xb0\x80\xc3\xe0\x93\xeb\x49\x35\xc8\x5f\x16\xa0\x07\x95\x19\x88\x23\x85\xe5\x86\x92\x17\x02\xb6\xc1\xf2\x43\x49\x3a\x1f\x7c\x72\x1f\x9b\x87\xb3\xbd\x2c\x91\x2a\x9b\x25\xc4\x4a\x3c\x2c\x92\x6a\xd1\xe2\x2b\xba\x7c\x92\xa6\x2d\xe4\xbe\x9b\x86\xb4\x3f\x78\x98\x31\x81\x9d\xa3\x05\x71\x0c\x22\x1e\x0b\x49\x64\x5c\x8c\x6d\x4b\x22\xda\xd2\xbd\xe1\x77\xd3\x4a\xb1\x68\x06\xa2\x53\x88\x22\xce\xee\x89\xaf\x24\xcd\x0c\xd3\x6a\xea\x22\xc2\x8a\x7e\xa9\xe5\xd2\xd2\x72\xa8\x85\xb9\xd1\x64\x60\x94\x98\x14\x95\x0a\x7d\x83\xdb\xa2\x74\x39\xaa\x7c\x07\x3d\x4d\xb0\x85\xaf\xb4\xca\x95\x6f\x5d\xed\x3d\xc8\x2e\xf4\xa3\xf4\xe0\xe3\x14\x50\xbd\x32\x9c\x52\xd3\x2b\x4e\xf0\x24\xfb\xa3\x13\x22\x09\x2d\x58\xb5\xab\xdc\x5f\x80\x3f\x7a\x18\xfb\xf0\xe2\xf9\xcb\x9f\x1e\xaf\x6a\x99\xc7\x4c\xdd\xa3\x6e\x91\x7a\xbd\x38\xfb\x0b\x53\x90\xf1\x94\x5c\x20\x39\x13\x4e\xef\x31\xdd\x1b\xfb\x3f\x37\x0f\xc0\xb1\x0e\xbf\x2a\xf5\x44\x1d\x5a\x25\x9b\x08\x05\x84\x3d\x58\x3d\x2a\x8a\x30\xe2\x6a\x63\x40\x12\x42\xa6\x94\x56\x9a\x81\xb8\xbd\x2d\x9c\x97\xab\x52\x6b\x8f\x14\x6e\xfd\x04\xbe\xfa\x53\x75\x22\x48\x48\x02\x03\x5a\xcc\xa9\x44\x1f\x75\x77\x60\x77\x9c\x88\x33\xc9\xc6\xf1\xa4\xaf\x27\xe2\x1d\x12\x77\x65\x38\x2b\x01\x6d\x5d\x24\xfb\x80\xb2\x1d\xe8\xc1\x73\x3c\xc1\x1c\x53\x0f\x1b\x61\xfb\x97\x42\xdd\xbc\xa3\xfd\x29\xa5\xea\x9f\x5d\x29\xcb\x46\x6a\x7c\x56\x7b\x04\x81\x03\x1d\x8d\x06\x0f\x2c\x0e\xfc\xbc\xff\x07\x09\xb8\xd5\x53\x1b\x62\x2a\xfb\xb6\xc5\xc8\x36\xe8\xab\x8e\x4b\xf3\x2b\x0e\x03\xc1\xf6\xb3\xd3\xa4\xf5\xa3\x04\x6d\xb7\x92\x69\x61\xfe\x74\xbf\x30\x73\x56\x66\xba\xef\x39\x79\x48\x28\x84\x28\x4a\x74\xae\x04\xac\xd6\x7f\xef\xf0\xfc\x00\xf6\xac\x5a\x76\xbd\xbd\x7d\x87\xe7\xdb\xdb\x1f\x60\x0f\xaa\x87\x61\xff\x3e\xcd\x37\x29\xc5\x20\x05\x12\x22\x8a\xa6\xd8\x3f\x32\x6d\xfb\x1b\xea\x4f\xa9\xfa\xd7\xdb\xdb\xd6\xc6\xdc\xde\xfe\x50\xd2\xd1\x6f\xca\x1c\xb0\x16\xaf\x36\x0d\x34\xe1\x7a\xa5\x17\x20\x2d\x4c\x97\x85\x7c\x50\xda\x97\xb9\x9d\xf6\x37\x36\xee\x87\x4d\xb9\x2a\xa5\xcd\x8d\xef\xad\x9f\xf5\xf7\x97\xbe\x18\xcd\x5d\x5e\x5f\xbf\xb9\x1c\x74\x45\x5f\x09\xaa\x99\x83\xfb\xeb\xe7\x1f\x1a\x5e\xce\x84\xc2\x5f\x3f\x6f\xbb\x04\x69\xf3\x49\x43\x47\x99\x81\xe5\x4b\x55\x55\x75\x36\x54\x3b\xa5\x5a\x4c\x8e\x23\x63\xd2\x59\x56\x29\xa5\x75\x04\x7f\x61\xce\x7a\x63\x24\xb4\x8e\xeb\xe3\x0a\xbe\x59\x44\xcc\x95\xf1\xe8\x23\x3e\xb5\xd8\x9f\x6b\xff\x03\x4e\x8e\x0a\xeb\x47\x9e\x6e\x99\xcb\x12\x70\x76\xf2\xa7\xd5\x4b\x64\x69\xa7\xe0\x1d\x5c\xbd\xb3\x3c\x05\xd7\xf2\xa7\x12\x6b\x25\xf0\x87\x52\x9b\xa6\xc6\x4f\x02\x11\x13\x82\x8c\x4d\xbc\x58\xb2\x6d\x90\xbf\xb0\x11\x4d\xca\x06\x2d\x2c\x61\x99\x77\x49\x33\x2d\xe1\x80\xa9\x24\x1c\xcb\xb9\x0d\x1e\xd7\x9e\x1e\x42\x7d\x72\x4f\xfc\x18\x05\x0a\x64\xa2\x41\x6a\xef\x92\xfb\xa5\x0c\x66\x6b\x0a\xea\xe7\xb7\x99\x9c\x18\xae\xdf\xf4\xb4\x1e\x4b\xe8\x3d\x0a\x94\xe4\xe8\xb5\x15\x75\x25\x40\x7b\x4d\xb4\xb9\xe8\xc4\xfc\x3e\xef\x6d\x2c\x9a\xd1\x2d\x00\xd7\x9b\xd4\xe6\xa9\xf0\xb3\x26\x5a\xb4\xee\xd7\x18\xd9\x51\xee\x3b\xe7\x25\x2d\x19\x8e\x35\xf1\xee\xf0\xdc\xf8\x5a\x22\xa4\x80\x3a\xe7\x56\x16\x90\xa6\xd9\x7d\x7b\xc3\x93\xa2\x99\x08\x89\x72\x98\x05\xd3\x67\xd1\xa4\x59\x30\x63\xd2\x6c\xd4\x05\x5f\x8f\x79\x72\x6e\x53\x0f\x45\x44\xa2\x20\x75\x9f\x6a\xcf\x69\xd1\x4f\xaa\x46\x93\xf1\x95\x96\x00\x4d\xbc\xa7\xa1\x55\xbc\xed\xd5\xa3\xd6\xd9\x63\xe0\x19\xdc\x42\x5b\x7e\x29\xc0\x74\x6a\x34\xc3\x9f\x36\xe2\x05\x35\x8f\xe6\xc5\x05\x7f\x91\x9d\xdc\x25\x42\xfb\x7e\x4f\xe0\x54\xbf\xd9\x64\x99\xd5\xc0\xa8\x30\x75\xd2\x16\xc0\xe3\x4c\x7c\x5b\x14\x05\xc4\x45\xb8\x11\x91\x51\xcf\xcb\xd6\x46\x53\xdb\x46\x63\x02\x39\x9e\x96\x6f\xb1\xcb\x9f\xdd\x5f\x2a\x50\xf5\x2f\xb6\x9f\xe6\x6a\x60\xb9\xf9\xce\xcc\xb2\x51\x19\xd4\x74\xe3\x69\xac\xac\x95\xf4\x04\xf9\x4b\x1d\xe7\xeb\xc9\x5c\xf9\x38\xdf\xb6\x5e\xeb\x71\xfe\x65\x0d\x46\x50\x2d\x4c\x2f\x4f\x5e\x96\x4d\x63\x12\xb9\x63\x59\xb3\xee\xfc\x3d\x27\x36\xb7\x85\x25\x65\x2d\x46\x4c\xf5\x36\x2d\x6e\x42\x24\xbd\x99\x75\xc8\x4d\xf1\x47\xa8\xcc\xef\x06\xbd\x59\x89\x54\xad\xd8\xd1\x4e\x27\x7d\x86\xaf\x81\x58\x57\x94\x96\x5f\xf1\x64\x42\x3c\x82\xa9\xdc\xad\x0e\x21\xeb\x4e\xa9\x8d\xfa\xb5\x1e\xde\xff\xbd\xfa\xc0\x77\x05\xe6\xaf\x81\xd6\xc8\xfd\x28\x09\xe5\x71\xae\x91\xba\xcb\xfc\xd6\x2e\x02\x6a\x4e\xbe\x9b\x25\x40\xdd\x8a\xc0\x6a\x02\xa0\x66\x2a\xa1\x9a\xff\x4f\x4b\xa7\x30\x39\x58\x49\xbd\x75\x1d\x7b\x15\xde\x4f\xec\x86\x16\x67\xb8\x45\xb7\xbc\x6b\x5a\xf6\x4a\x05\xf3\x94\x34\xa9\x58\xd2\x62\xd3\x6c\x1d\x5e\x42\xcd\x91\x46\x21\x8a\xba\xe5\x1a\x56\xd1\x75\xc1\x70\x2a\x4e\x7d\x8d\x88\x29\x4c\xcc\x51\x1e\x50\xd5\xcb\x0d\x53\x54\x0f\xa5\x32\xce\x35\x71\x25\x9a\x38\xca\x19\x63\x42\x29\x73\x88\x4f\xb1\x4c\xfc\x8d\x36\xda\x62\x91\xcf\x12\xb1\x14\x62\x89\x7c\x24\x91\x8d\xdb\x70\x40\x89\x00\xac\x98\x4b\x5b\xb5\xd6\x07\xac\x23\xdd\x75\x0a\x6d\x59\x00\x07\x11\x26\x7f\x5a\x16\x0e\x3e\x96\x58\x30\xa8\x91\x63\x3d\xa8\xb3\x6f\xeb\x44\x58\xae\x5d\xb5\x49\x59\xb9\xa7\xb4\x0f\x77\x6a\x6b\x9b\xe6\xa8\xa7\xd2\x5e\xaf\x89\xdb\x71\xcb\x6b\x1c\x94\x7d\x07\xca\xa8\x2e\x33\x74\x8f\x2b\x82\x76\x20\x13\x2a\xa1\x95\x93\x32\x39\xb9\x94\x84\xfc\x8b\xd1\xd2\xfb\xe1\x97\x9b\x8a\xff\x94\x7b\x81\x6a\x42\x3e\xb4\x1f\xfe\x61\x86\x79\xe6\x70\x3a\xe2\xcc\xc3\x42\xb8\x18\xcf\x8a\x9d\xf5\x2a\x67\xdd\x80\xcf\xb0\xd0\xca\x19\x9a\x4c\xb0\x67\x08\xdd\x70\x41\xe6\x14\xc8\xce\x77\x1f\x86\xe5\x17\xcc\xc7\x34\x71\xe3\xef\xa7\xc7\x04\xb6\xf1\x84\x70\x91\x72\xa4\x9e\x76\xe7\x8a\x17\xe5\x3c\x6f\x1e\xeb\x15\x78\xd4\x12\xe5\x3d\xe7\xab\x4b\xba\x77\x39\x38\x2b\x0a\xba\x5a\x20\x85\xb0\x67\x37\x5d\x5a\xb8\x11\xe1\x06\x52\x8c\xba\xd9\xa4\xa0\x29\x71\xf2\xf4\x6c\x6a\xc0\xf2\xd2\xe7\xac\xd4\xe7\xb4\xa2\xc8\x28\x75\x3f\xb5\x12\x18\xda\x89\x63\x22\xf1\xd4\x1e\x62\xe3\xfc\xbc\xd2\x15\x31\x8f\x6e\xe1\xe2\xa3\xd4\x6e\x6c\xdd\xe1\x51\x84\xa9\xaf\xe8\x18\x01\x47\xd4\x67\xa1\xb1\x6e\x3e\x36\x3b\x8b\x0a\xee\x22\xa5\xc8\xf1\x9e\x87\x04\xce\x05\xdc\x95\x7b\x8c\x6c\x20\x5d\x29\x54\x1d\x5c\x67\xb3\x84\xa7\x24\x89\xce\x36\x40\x17\xc2\xee\xd2\x20\x3b\x1b\x56\x57\x0a\xb3\x2a\xd4\xee\xa5\xc6\xe6\xc7\x7f\xd4\xba\x94\x96\x14\xa5\x5e\x65\x92\x5c\xc3\x06\x55\x24\x92\xaa\x4c\xb6\xd5\xf8\xb4\x12\x5c\x0b\x72\x6b\xb8\x47\x61\xed\x09\x4b\x28\x96\x4c\x78\x28\x68\x48\x39\x6c\x95\x01\x57\x2c\x9d\x53\x04\xdd\xd4\x6c\x85\x49\x6e\xdb\x47\x95\x4e\x98\x36\xcf\x26\x23\x6a\xc3\x88\x05\xc4\x9b\x67\x42\xe6\x2a\xed\x88\xc4\xb5\x9c\xec\x79\x4e\xb9\x48\x77\x4a\xdb\x0f\x06\x32\x31\xce\x42\x7d\x5e\x51\x6d\xe0\x08\xb5\x41\x62\x59\x65\x94\xad\x60\x92\x35\x9b\xac\x60\xc7\xbc\x4e\xc3\xca\x9a\x43\x1a\x6e\xcd\xfb\x55\xf1\xe9\xba\x9a\x52\x66\x8d\xec\xa2\x68\xdf\x95\xd9\xd7\x6a\x80\x42\xba\x20\xe7\x4a\x6c\xe6\xe2\xc4\x84\xcd\x62\xd4\x50\x4d\xc4\x17\xf1\xb5\x88\x0a\x16\xe3\x13\x17\x9f\x1d\x13\xdb\xb6\xab\x1d\x55\xda\x85\xd6\xcf\xe4\x31\xee\xc1\x6d\x36\x76\xd3\x04\x57\xa0\x88\x08\x1d\xd2\xe7\xb1\x30\x8a\x25\x1e\xdc\xbf\x18\x24\x51\x67\xd7\x69\xd4\xd9\x87\x4c\xd4\xd9\xb5\x0b\xe8\xbe\x31\xfd\x7d\x18\x64\x66\x43\xcf\x2a\xc1\xaa\xb1\x9e\x16\xd5\xf6\xb6\x16\xeb\xbd\x4c\x9c\xdb\x97\xe9\xf1\x8c\x49\x9c\x09\x89\x31\xcb\x57\xa8\xa4\x29\xcc\x0e\xab\xb1\xd1\x2b\xe0\x32\x6e\x6a\x21\x1b\xf4\x2a\xd2\x08\xcc\xb3\x92\xe3\xc2\x11\x6f\x1a\xc5\x52\xc7\x0f\xbd\x84\xaa\x0e\x12\xb4\x07\x87\x85\x19\xab\xa3\x7c\x7b\x72\x79\xb0\x18\x0b\xe6\x1e\x4c\x3d\x3e\xd7\x1c\xb1\x6e\x01\x7d\x52\x80\xbc\x01\xf9\xdc\xb2\x8b\x0a\xf1\x9c\xb6\x06\x81\xa5\x24\x74\x9a\x3a\xae\x1c\x6f\x7f\x59\x09\x39\xf5\xf0\x85\xff\x36\x14\x6f\xf1\xbc\x4a\x3f\x75\xcf\x6a\xa2\xf2\xd7\x7c\x07\x6d\x65\x66\x5e\x64\x1e\x05\x2c\xf6\xe1\xed\xbb\x11\xdc\xe1\x79\x92\x97\x1d\x0b\xad\x8e\xd6\x12\xe3\xc5\x31\xf8\x44\xdc\x65\x88\x4e\x4f\x38\x0a\x82\x24\x42\x3f\x89\xe6\x69\x58\x01\xf8\xf2\x1c\xa8\xc7\x7d\x17\x8a\xc1\x91\x42\x9e\xbd\xc5\xad\x38\x4f\xe0\x60\x72\x4a\xe8\x5d\x05\xf7\xf9\x11\x23\x54\xae\x9f\xf7\xb2\x70\x37\xc2\x79\x2d\x3a\xa8\xa0\xa1\x0b\xc6\xe5\xc0\x8d\x3c\xaf\x92\x5a\xfe\xab\xa9\x11\x50\x67\x1a\x6d\x8c\x2f\x31\x55\x60\xde\x48\x19\x29\xdc\x0f\x3d\x0f\x57\x55\xa0\x36\x8f\x99\xd0\x31\x63\x01\x2e\x8d\x44\x4b\xd1\x2d\xac\xda\x62\x37\xcb\xb3\xe8\x70\xa2\x87\xbb\x6f\xb1\xd6\xc1\xff\x80\x34\xb0\x34\x51\x94\xd4\x6f\x7f\x11\xe3\x52\x00\xcb\x71\xa2\xc9\xd4\xc2\x1f\x25\xe6\x14\x05\x60\xb8\x42\xf4\xe1\xd8\x96\xf5\xa9\xbe\x70\xd3\x3c\x13\x14\x88\x8a\x3a\x3e\x6e\x36\x56\x60\xe7\x99\x9d\xad\x16\xeb\xd1\x50\x5f\x63\xf9\x0a\x1b\xab\x4a\x61\xb7\xc2\x75\x8b\xcb\x31\xf2\x95\x5e\xd9\x28\xcd\xf2\x54\x50\xcc\x1e\x09\x51\xa4\x0b\x2f\x30\x2e\xb3\x6f\x36\x2e\xd6\xfb\xcb\x53\xd1\x87\x3f\x48\x10\x18\xa7\x80\x0e\xfa\x8a\xe2\x40\xbb\x9f\xc9\xc4\xd2\xd6\x8d\x9a\xfd\x1b\x05\xfc\x06\x35\x51\xab\x36\x3b\xd4\x58\xd6\x4a\x02\x53\x0f\xe7\xe4\xd2\xfa\x44\xe8\xaf\x05\xc8\x1b\x10\xa2\x2d\xbb\xa8\xd9\x8a\xc5\x0c\x71\xec\xc3\x91\xd1\xf9\xe1\x84\x4e\x09\xc5\xce\xd6\x74\x4a\x4d\xe5\x7c\x97\xee\xbd\xe8\x2b\xe9\x3e\x84\x1a\xc1\x32\xbc\xd0\x64\xbf\x7e\xe9\x3a\xcc\x75\xf0\x08\xb9\x5a\xa7\xae\xd4\x32\xc0\x43\xc2\x4e\xfa\x48\xc0\x8d\x18\x86\x17\x4a\xf4\x70\x2c\x84\x92\xa7\xaf\xe6\xae\xfa\xff\x7e\x92\xe4\x58\x0b\xd6\x45\x33\x70\xac\x04\x91\x67\x93\x25\x4b\xa1\x1b\xa7\x9d\xc2\xa3\x16\xa2\x46\x0f\x47\x33\x1c\x62\x8e\x82\x54\xe6\x67\x41\x01\x12\x82\x4c\xa9\xad\xc7\x50\x5f\x0e\x21\x9b\x80\xa9\xdd\xfd\xb7\x0e\xbb\x1b\x12\xdd\xa8\x19\xb9\x4d\xb0\x57\x5a\x80\x87\x8c\x2f\xb2\x16\xe4\x18\x5b\x29\xe4\x6b\x42\x16\xf1\x98\x62\xf9\xc0\xf8\x5d\xf2\xb5\xfd\xdb\x8e\x5a\x2d\x1b\x9b\x4c\x7a\x6d\x56\xca\xc7\x11\xa6\x3e\xa6\xca\x1c\x4d\x0c\xca\xa4\x0a\x8d\xef\x42\xb4\xb4\xcc\xd3\x81\xb0\x0f\x44\xce\x58\x5c\xb7\xb9\x40\xf9\x34\x6e\x4c\xbf\x15\x98\xdf\x63\xee\x8a\xee\x35\x34\x72\x87\x8e\xdf\xd0\x26\xfa\xce\xa2\xdc\x96\x91\xb5\xe5\x92\x97\x93\x6e\xd4\x80\xa9\x74\xf5\x48\x90\x5f\xed\x37\x03\x13\xac\x99\xe7\xff\x1d\x81\x31\x5c\x5f\x64\x1c\x0c\xee\xb7\x04\x7e\x9a\x6b\xbb\x90\x80\xe9\xbc\x35\x3e\xf3\xc4\x40\x48\xa6\xe6\xa0\xc7\xb1\x42\xe7\x5e\x7d\x74\x20\x9e\x39\x7f\x0a\xa2\xfe\x8d\xeb\xe0\xc6\xfd\xba\x5b\x13\xbe\xb4\x22\x19\x59\xde\x59\xbf\x09\x7a\x66\x00\xaf\xe8\xae\x2b\xac\xa0\xe3\x78\xcd\x8f\xb5\xeb\xa6\x9d\x7a\x4a\x4e\x84\xc8\x9b\x99\x4d\x32\x0c\x63\x4a\x5c\xfa\x67\x7a\x1c\x91\xc9\x00\x52\x5c\x5d\x0b\x35\x15\x3a\x37\x31\x27\x7a\x8f\xa0\x98\xe8\x64\x82\xdb\xcc\x0f\xb7\x40\x19\x87\xdb\xfc\xdb\xf5\xae\x2d\x22\xa0\x70\x84\xb9\x65\x59\x79\x2b\x19\xb5\x4b\x89\xb3\xb4\x57\x5b\x59\xcd\xcc\x80\xce\x26\x27\x12\xf0\x47\x22\x64\x6e\xd0\x08\xb6\x8e\x62\x21\x59\x08\x23\x8d\xa5\x5b\xa7\xad\x7a\x47\xa5\x26\xfe\xf7\x3a\x95\x69\x94\x8c\x4e\xb4\x26\xf8\xb4\xc9\x6e\xbd\xed\xa0\x16\x8e\xe9\xc0\xfb\x24\x00\x65\x57\xa7\x9f\xc5\x41\xa0\x94\xd5\x7d\x88\x10\x97\x04\xa9\x3f\x86\xfb\xa0\xf6\x83\x19\xe3\xf5\xf2\x58\x7b\x2e\x36\xe1\x61\x75\x29\xca\xd3\x80\x8d\x51\x30\xb0\xeb\xb6\xa2\xf3\xb4\x02\x98\x6a\xe0\x3e\x3f\x21\x2f\xa5\xe5\xcf\x41\x33\x97\xb7\x73\x90\x80\xbe\xb6\xc0\xc7\xbf\x72\x16\x47\x87\x93\x09\xa1\x44\xb6\x50\x0e\x1b\x77\xa9\xa2\x50\x2a\x76\xd1\xa6\xe9\x1a\x34\xfe\x65\xfa\xad\x50\x4b\x15\x08\xd0\x30\xc0\x01\x31\xfa\x10\x0b\x70\x4f\x62\x8a\x68\x3d\x17\x38\x15\x73\x03\x8a\x48\x7d\x08\xa4\x79\x7a\xe9\xf2\xd6\xbc\xd5\xc6\x80\x80\x2c\xa9\xd4\xbf\xd6\x7a\xef\x82\x1a\x52\x69\x68\x56\x57\xff\xcc\xd6\xde\x43\xd9\x45\x82\x6b\x85\x7c\x03\x50\x80\xa9\x5e\x69\xc7\x70\x6d\x05\x6d\xc2\xc8\x03\xa5\x66\x2b\xc9\x95\xcc\x54\x83\xe4\x05\x57\x74\x2c\xeb\x09\x72\xe1\x07\xae\xf4\x0a\xa3\xa5\xb2\xb8\x11\xb0\x93\xd5\x0a\x19\x3b\xb2\x8d\x09\xe5\xbf\x18\xc5\x62\x10\x8b\x9e\xa7\xb4\x3d\x14\xbc\xe8\xa1\xcc\x24\xe8\x8f\x3d\x8d\x42\xef\x45\xbd\xa4\x86\x3a\x69\xbd\xee\x5e\x72\x0d\xea\xe4\x3c\xac\xce\xa6\xb0\xa4\xbc\x87\x2a\x99\xdf\x92\x33\xda\x4b\xfe\x88\x93\x7b\x24\xf1\xf0\xe2\xfe\xe7\x5f\xf5\x72\xb7\x75\xbd\x2e\x7d\x84\x5c\xde\xd1\x17\xda\x05\x2a\x7a\x3f\xa1\x71\xb8\xa2\x9a\xac\x90\x52\x12\x46\x81\x74\xbe\x5f\xed\xed\x69\x73\xac\x02\x70\xe1\xf2\x3b\x6d\x9c\x39\x5c\x5c\x0e\x7f\x3f\xbc\x3a\xb9\x19\x5e\xfc\xfe\xf3\xcd\xaf\xe7\xe7\xbf\x9e\x9e\xdc\x1c\x1e\x1d\x9d\x8c\x46\x37\xef\xcf\x46\x17\x27\x47\xc3\xd7\xc3\x93\xe3\xba\x52\x80\x00\xc3\xb3\x37\x27\x97\xc3\xab\x9b\xd7\x97\xe7\xef\x6e\x46\xef\x5f\x9d\x9d\x5c\xfd\x71\x7e\xf9\x76\x1f\xce\xdf\x5f\xbd\x3a\x7f\x7f\x76\xbc\x0f\xaf\x86\xc7\xc3\xcb\x93\xa3\xab\xe1\xf9\xd9\xe1\xe9\x06\xb4\x1a\x4c\xe3\xb0\x7e\x33\x6a\x37\xd0\x5a\x10\x15\xe3\xac\x6d\xe3\xa6\xa0\xf6\xa5\xdc\xec\x54\xbe\xc9\xb1\xb2\xfc\xb5\x86\xbc\x39\x7d\xe9\x72\xb1\x93\x2f\xc4\x2b\xcb\xf5\x5c\xa1\x33\x65\x80\xe4\x95\x26\x8f\x51\x11\x87\x4d\x3a\xc1\x7f\x14\x94\xec\x44\x6f\x40\x77\x6a\xab\xf1\x18\x8c\x71\x66\x44\x57\x6a\x92\x37\xa6\xfe\x1c\x95\xf6\xd7\x16\xc6\x66\x08\xa0\x1c\xa7\x06\xf1\x09\x75\x22\xd4\x8a\xcf\xcc\x12\xdb\x32\xd4\xaa\x9f\xc6\xed\x6d\x41\x7c\x5e\xfd\xfb\xe2\x24\x27\x28\xe1\xec\xfc\xe6\xf2\x64\x74\x72\xf9\xfb\xa1\x62\xe7\x7a\xc1\xa9\x9e\xc3\xb3\x7f\xe7\x1a\x80\x85\x75\x94\xfd\x76\x83\xfa\x41\x93\xe4\x04\x2d\xa1\x8a\xe3\x6c\x6c\x90\x9f\x86\xc6\xd7\x0b\xb3\xd0\xf8\x7e\xd9\x24\xd5\x36\xba\xc3\xb5\xc2\x12\x1e\xc5\x3a\x4d\x01\x03\x95\xe2\xea\x88\x71\x8e\x45\xc4\xa8\x9f\xdc\xf5\xa7\xd3\x06\x14\xba\x8d\x94\x53\x20\x63\xa7\xb6\xd5\x2b\x00\x8f\xa0\x95\xa6\xd4\x34\xf3\xb4\x49\x50\x73\x88\x2c\x93\xa6\x66\x9e\x65\xe7\xb1\xb6\x0a\x6d\xfa\x7c\xe9\xa9\x5c\x3a\xcf\x2d\xdb\xa8\x6d\xb6\x9b\x79\x1a\x73\xde\xcc\xb3\x04\xf1\xb7\x4f\x2f\x35\x8f\x9a\x57\xe2\x29\xd5\x97\xc5\xb4\xa2\x48\xfc\x52\x48\x14\x8b\xf1\xe6\xe0\x2f\xaf\x33\x28\xb5\xfa\x3a\xa9\xd6\x6d\xb1\x55\xba\xb5\x02\x57\x63\x87\xbb\x88\x47\x63\x88\x7b\x8c\x7a\x38\x92\xfa\x83\x3e\x4c\x22\x74\xea\x4e\xa4\xc4\xc0\x42\xed\x59\xa8\xe2\x99\xfd\xe2\xc6\x7d\x71\x43\xe8\x8d\x03\x58\x6f\xb7\xef\xa0\x40\x30\xd0\xee\xd9\xdf\xdf\xe9\xe0\x41\xb8\x08\x10\xc5\x40\x7c\x4c\x25\x91\xf3\xe5\x51\x26\x28\xfd\xa9\x17\x71\x42\x3d\x12\xa1\x40\x3c\xbb\x0f\x6f\x0a\x78\x6a\x1c\x6f\x22\xd5\xdf\x8d\xeb\x6f\xb7\x1e\x5f\x17\x74\x9b\xcc\xb0\xf3\x35\xfc\xfe\x2e\x73\xd8\x22\x99\xb3\x66\x8c\xa1\x54\x0b\xd2\xc4\xa9\x5d\x04\x48\xea\xea\xa7\x16\x47\x61\xbc\xf4\x4c\x16\xfd\xeb\xd7\xd6\x8e\xad\x85\x69\xcf\x1d\xdc\xf5\xec\xed\xc9\x20\xe7\x8e\x31\x63\x58\x5c\x6e\x0b\xb6\x38\x9d\xf5\x33\x57\x93\x96\xe4\x9e\x2f\xec\x04\x1e\xa2\x70\xd0\x9a\xdd\x12\x6f\x00\x0e\x11\xa9\x76\x19\xe5\xc5\xc3\xc8\x63\x51\x9b\xdb\x6f\x9a\x36\x98\x5a\x19\x61\x3a\x59\xd1\x02\x77\xd7\x56\x15\x28\x04\x44\x13\x50\xb0\x87\xcb\xf6\x1a\x1c\x5d\xbf\xb5\x70\xea\x95\xf0\x83\xe9\xaa\xae\x70\x51\xfa\x8c\x91\x30\x97\xf9\x2a\xa4\x34\x12\xa6\x2a\xe0\x03\x9a\x8b\xa4\xb3\x03\xd8\x83\x1a\x27\x1b\x8a\xe5\xcc\x92\x76\x2c\x30\x77\x74\xdb\xe7\x18\xf9\x8d\xe7\xf5\xcd\x90\x7d\x7c\x2f\x24\xe3\x68\x8a\x35\xc4\x9b\x07\x4e\x1a\xd8\xb1\x19\xa6\xbd\xf0\xaa\xaf\x61\x19\xce\xaf\x3f\xd1\x33\x53\xa3\x8b\x95\xe6\xe5\x43\x5a\xa6\xcf\x5d\x64\xa7\xdf\x52\x72\xb6\x16\xa2\x2b\x55\xda\x66\x6e\xc7\x64\x6a\x2a\xe5\xef\xd5\xc7\x67\x34\x83\xd1\x1c\xde\x47\x7e\x48\x68\x5f\x7f\x5e\x17\x48\x7d\x60\xbe\xd4\x5a\x4e\xe2\x20\xb8\xf1\x18\x95\x9c\x05\x1b\x3b\xaa\x5a\x42\x47\x5a\x56\x3f\x6a\xa1\x1b\xb5\xd6\x8b\x96\xd4\x89\x92\xf3\xd1\x0d\xe8\x43\x09\xec\xf5\x1c\xc5\x67\xe2\x6f\x1a\x62\x0b\xc7\x78\xa5\xd3\xf8\x5a\x98\x3a\x4b\x2a\x77\x14\xdf\x78\x36\x5c\x9d\x13\xe1\x9e\x4d\x9e\x0d\xc7\xa2\x87\x91\x90\x2f\xb2\x67\xe0\xea\xf3\xf3\x47\x1e\x13\xaf\x0e\x57\xbf\xf4\x04\x8f\x92\x5b\x11\x6a\xfb\x33\x05\x89\xaa\xee\xee\xb7\xbf\xaf\xa2\x38\x5c\xa1\x9a\x30\xcb\xe6\x40\x24\x85\x93\x0d\x3e\x5a\x08\x30\xaa\xd7\xf2\xb5\x82\x7f\x85\xf4\x1e\x97\xb6\x69\xab\x87\x6a\xeb\xb7\xc7\x78\x4f\xa2\x69\xcf\xcd\xbc\x78\xa6\xd0\xd9\x40\x48\xd1\x3a\x44\xb5\xa8\xbd\x1d\xe9\xeb\x49\xea\xaa\xb2\x0b\x4b\x76\xdb\xae\x0c\x83\x7b\xea\xa4\x73\xa6\x1e\x83\x5c\x20\xb8\x36\x01\x01\xc9\x21\xaf\xce\xde\xc3\x7e\x1f\xce\x75\x70\xb0\x3e\xf2\xd5\x47\xfb\x58\xc8\x7d\x20\xba\x1a\x80\x3b\xe9\xaf\x37\x54\x4c\xa4\xec\x96\x89\x64\xd9\x72\x39\x76\x4a\x29\x63\xa6\x4c\x89\x89\x3f\xa6\x8c\xf6\xcc\x3b\xcb\xe4\xea\xed\xdb\x02\x0e\x46\xdd\xd6\xc8\x47\xc4\xbb\x03\x64\xa6\xc2\x85\xe9\x26\x2e\xa0\xa6\xf5\x2f\xb0\xa8\x43\x56\x4f\xc2\x14\xcb\x74\x06\xcc\x4c\xab\xfe\x8c\x32\xdd\xb4\xf7\xd9\xcb\x3c\xda\xee\x50\x6a\x17\x6a\x72\xb8\x6d\xe0\xa0\xfc\x5a\xfd\xb3\x6a\xc2\x67\x0e\x84\x7a\x2d\x7b\x20\x3e\x59\xef\x26\x43\x28\x51\x33\x47\xfe\x32\x47\x02\x3a\x80\xb8\xe1\xf6\xab\x3a\xe1\xbe\x10\x34\x5e\x02\xbd\xa2\x69\x15\x3b\x1e\x99\x9b\xaa\xb2\xd7\xdd\x01\xa3\xa6\x70\x8f\x8e\x7c\x40\x93\xba\x78\x64\x1b\xcd\x4f\x84\x52\x92\xa2\x00\x6b\x5e\xcc\x06\x89\x1b\x98\xba\x44\xb0\x56\x59\x78\x4c\x75\x21\x21\x54\x1b\xe6\xec\x42\xa2\xd5\xc6\x8a\x4d\x08\x86\x30\xb7\xf0\x79\x88\x82\xc4\x42\x6a\x56\xf4\x75\xb9\xf4\x5b\xce\x02\x7c\x9b\x04\xb4\x56\x6f\xae\x4c\xf7\xae\x2f\xb9\x73\x48\x99\x5a\x6b\x06\x1b\xd0\x95\xd6\x93\x0e\xf5\xfd\x52\x62\xc6\x1e\xaa\xc3\xf8\xc7\x38\x60\x0f\xb6\x58\xfa\xad\x17\xf3\xe0\x16\x76\xe6\x16\x49\xed\xe2\x8a\x05\x86\xdb\x87\x29\x96\xb7\xbb\x07\x70\x79\x7e\x7a\xf2\xcb\xff\xec\xa8\xf7\x2a\x21\xf6\xde\x24\xb1\xc3\xbd\xd7\x01\xba\x67\xfc\xc0\x38\x93\xb4\x4d\x73\x30\x18\xb8\x51\x3a\x56\x71\x6f\x2b\x96\x71\x5b\xec\x00\x49\xc9\xc9\x38\x96\x58\xa4\x3e\x31\x35\x49\xd5\xbe\x1a\x32\x81\xeb\x6b\xd8\xfa\x9f\x4f\x0a\xc9\xcf\x5b\xf0\xcb\x2f\xb0\xbd\xfd\x4e\x4f\xcb\xf6\x36\x7c\xf8\xf0\x4f\x25\xa0\x28\xf4\xfb\x7d\x37\x59\x8d\xa9\x5a\xc8\xd0\xa3\x6e\x83\x03\x81\xf5\x07\x3b\xbf\xae\x71\xee\xa5\x09\xa9\xe2\xbd\x15\x76\xf4\xa5\x76\xf3\x65\x8c\xae\x86\x6d\xbc\xd5\x91\xf5\x63\x8e\x1c\x97\x61\xfa\xb6\x27\xb5\x29\x3f\xbc\x26\x41\xc3\xf9\xec\xf2\x3a\x8a\xcd\x1f\xcc\xf5\x51\xdb\xb2\xa2\x56\x88\x71\x99\x8e\x8c\xe1\xee\xc2\xd4\x52\xd4\x1b\x5c\xfe\x13\x12\x34\x1c\x86\xac\x1a\xb7\xe1\xae\xee\xbe\x22\x21\x66\x71\xed\xe9\xc0\x63\xa7\x2f\xed\xa5\xfd\x04\x66\x4a\x7e\x84\xda\xcb\xc7\x26\xb6\x38\x5b\x2a\x05\xf5\xed\x40\xf5\x86\x31\x24\x12\x3e\xc9\xaa\x54\x62\xff\xc5\x73\x08\x09\x55\xa2\xc6\xe8\xfb\xbf\x8d\xce\xcf\x80\x63\xab\x47\x34\x57\x93\xd0\x07\x57\xd7\xc7\x36\xc5\x36\xb5\x0d\x7c\x7c\x8f\x03\x45\xbb\x22\x6b\x20\xe8\x4b\x3a\x3c\x16\xf4\xc6\xf1\x64\x82\xb9\x30\x96\x82\xfe\xf6\xc7\x67\x7f\x0a\x46\xeb\x2d\x03\xd0\x8e\x77\x5b\x6d\x46\xe9\x8a\x3a\xab\x17\x91\x40\xd8\xfa\x3f\x6a\x6b\x88\x02\x44\x91\x64\x7c\x0e\x98\x73\xc6\x21\xc4\x42\xa0\x69\x13\x79\xed\xc8\x59\xfe\x42\xa9\xcc\xec\x9a\x9a\x7b\xc8\x94\x9d\x55\x3f\x69\xb8\x88\xd6\x6b\xa4\x60\xc1\x78\x18\xfb\xaa\xa1\x59\x77\x88\x30\x27\xcc\xdf\x35\xd5\x54\x72\xdd\x10\xd1\x70\x41\x2d\x64\x16\x51\xd7\xe9\xc3\xd4\x4f\xee\xbf\xca\x81\x5f\x33\xa3\x04\x64\x82\xbd\xb9\x17\xe0\x75\xe7\x41\x9e\xe6\x01\x37\x35\x5a\x41\xdc\xb6\xeb\xa1\x82\xe1\x92\xc6\x2e\xdd\xf1\x2b\x97\x70\x40\xb1\x64\xc7\x58\x2d\xbf\x92\x22\x6b\x30\x09\xdd\x1d\x62\x3e\x92\xb8\xa7\x48\xa8\xbd\xf9\x78\x98\xc3\x65\xb5\x23\x4f\x2d\xc6\x74\x45\xfa\xa2\x5d\xa8\x46\xda\xf3\x35\xf8\x7a\x2e\xab\x12\x5a\x5a\x2c\x29\xcc\x84\x44\x61\xb4\x61\xb9\xb4\xd2\xce\x93\x59\x4c\x19\xac\xdf\xbc\x3f\xcc\x82\x5f\x6d\x79\x14\xdb\xeb\x25\xf2\x5d\x05\x05\x36\x69\x17\xe3\x79\x55\x12\xcf\x9d\x5d\x52\x57\x64\x34\x11\x60\x44\xb4\x92\x5c\xef\x08\xd5\xa5\xff\x4d\xed\xee\xdc\xf6\xf5\xcf\xe4\x5e\x80\xf4\xc7\x9f\xc0\x6f\xb2\x99\x6b\xe9\xe7\x0b\x6d\x6b\x2b\x91\x0f\xf1\x03\xbc\x41\xf2\x19\x66\xc1\xaf\x46\x3e\x09\xd5\x48\x06\x77\x18\x47\xad\x53\x99\x51\x40\xee\x95\x5c\x20\x6a\x53\xf4\x75\xf5\xac\x1d\x2d\x25\x28\xb3\x57\xb4\x1a\x43\x94\x12\x3a\xdd\xed\xc3\x05\x12\xa2\x49\x1b\x33\x77\xc3\xce\x38\x16\x33\x16\x98\x24\x65\xb3\xa3\xe7\x52\x0f\xcc\xe1\xa8\xa5\xd1\xbe\xa3\xb6\x26\x07\x89\x21\xb6\x7f\x34\x13\x62\x4a\x6c\x0d\x2e\xa7\x6f\x8e\x10\x47\x12\x71\xf9\xf5\xf7\xa4\x61\x16\x95\x9a\x66\x6b\xaa\x7f\xb1\xb8\x7d\x8d\xb1\xa7\xaf\x78\xf1\x1b\xcc\x99\x1d\x7d\x31\x1e\xc7\x1e\xa6\x52\xdf\x81\x3d\x21\x94\x88\x19\xf6\x77\xb5\xeb\xc4\x82\xc1\x01\x99\xea\x50\x4f\xa5\x75\x68\xaa\x6c\xd2\xc6\xfd\x58\x97\x4b\x52\xfd\x53\x2c\xaa\x55\xfa\xa7\xbd\x3b\x1a\x0f\xc5\xba\x75\xcd\x77\x68\xa3\xf5\x36\x5a\x80\xaf\x2b\x7b\x55\x5b\x64\xa3\xb6\xf6\x95\xd4\x95\x58\x34\xf9\x25\xc9\xe1\x1b\x2a\xb5\xb1\x74\xf2\x7f\x2b\xa5\xd6\xf3\x70\x80\xb9\x32\xd9\xd6\x7f\x76\x76\x98\x01\xbe\xda\x2e\x56\x58\x99\x0c\xb6\xf9\xea\x52\xf5\xe7\x87\xc6\x6c\x10\xd9\xf0\x9a\xcd\x9d\x80\x2d\x51\x9b\xe1\x29\x45\x38\x34\xe6\x89\xac\x8f\x41\x5b\x92\x45\xfb\x4c\x89\x1c\x19\x1f\x35\x05\x99\x66\x47\x4d\xa8\xc4\xd3\x06\x85\x08\x32\xdb\x22\xa1\xf2\xe7\x9f\x1a\xdf\xae\x66\x83\xa3\xc6\x90\x39\x28\x3b\x54\x36\xe5\x99\x9d\xb7\x21\xc7\x05\x88\xfb\xa2\xfc\x3a\xd7\x85\x11\x6b\x2d\x6c\x1e\x61\xc0\x1f\x23\x26\x32\x17\x50\x25\x25\x5c\x5a\x8c\x6c\xe5\x80\xe7\x0c\xd6\x6d\xf2\x58\x56\x8d\x48\x5e\x98\xf2\x16\x49\x2c\xc5\x03\x9e\xd7\x8f\x0d\x2f\x31\x4f\xc9\x72\xe9\xe9\x4f\xea\xc9\xea\x53\x23\xb5\x14\x6e\x25\x5a\x00\x4d\xcb\xed\x8c\x70\x1a\xd9\xea\x04\x64\x61\xdc\xad\xc3\x05\xd2\x2c\xe2\x31\x96\x68\x50\x58\x2b\xd1\xe8\x17\x54\xcf\xb2\xc7\x94\xba\xa7\x86\x5c\x5b\x1d\xf0\xd2\x5b\x44\x68\x40\xef\x89\x4f\x50\x4f\x62\x11\xa0\xde\xdd\xff\x36\xc4\xc0\x98\xa7\x39\xb5\x77\xdd\xdd\x2d\x34\x83\xbd\xbd\xc3\x58\x32\x7d\xfc\x0f\x27\x1f\x3d\xac\x49\x6e\x6f\xef\xa0\xea\xba\x83\xfc\x33\x67\xb1\x36\xbf\xd2\x1b\x86\x93\xa0\xea\xeb\x14\x6e\x22\x88\xd7\x14\xbb\xae\x7d\x07\x6a\x8e\x9e\xe9\x7e\x6f\xd4\xdf\x37\xea\xef\x9b\xe4\xa6\x9b\xe6\x4c\x73\x80\x09\x46\x32\xe6\x78\x5f\x8f\x42\x57\x5e\x72\x16\x60\xe6\x48\x7c\x91\x67\xda\x48\xb6\x2c\x57\xed\xeb\xfd\x3e\xb9\xff\x73\x71\x0d\x6a\x4b\xff\x9a\x67\x25\x31\xe7\x13\x71\x57\xaf\x36\xc3\xca\x59\x99\xc7\x09\xec\x36\x6d\x1e\xb9\x3f\xb7\xea\xac\x42\x69\x53\x6d\x81\x99\x02\xb0\x05\x3d\xfa\xa9\xe8\x5c\x6d\xb5\x8a\x31\x63\x52\x0d\x67\x44\xfe\xc2\xbf\x8e\xdb\xa5\x3d\xb5\xd3\x27\x96\xd1\x26\x0a\x84\xf0\x2a\x87\x54\x43\xdb\x8a\x35\x52\x6d\x95\x91\xf2\xeb\x2b\xc7\x6e\x6a\xa8\xa6\x7e\xef\x8e\x9d\xc5\x46\x16\x21\x02\xfe\xf1\xfc\xf9\xaf\xaf\x1a\xb7\x86\x95\x55\x06\x37\xff\x9b\xcd\x7a\x7d\x95\xe9\x65\x99\xe9\x2c\xc9\x29\x5d\x7d\x26\xb7\x22\xbf\xa7\xb6\x75\x1f\x71\x7f\x6b\xd7\x5e\x1c\x9a\xe4\x98\xaa\x5f\xc7\x28\x50\xbb\xbe\xbf\x05\x3b\x17\x98\x0b\x22\x24\x6e\xa1\x48\x6a\x6e\x7c\x65\x9b\xc2\x88\x29\xa8\x23\x89\x24\x86\x63\x4e\xee\xf1\xee\xbe\xe9\x59\xac\x02\xb6\x0c\x1a\xe3\xf9\xa1\x2c\x0f\xf5\x0d\xe2\xbe\xf9\x64\x60\x5a\x35\x47\x7f\x23\x97\xd1\x68\xd4\x1a\x88\x67\xea\xff\x5a\x1c\x8a\xdd\x46\xb1\xbf\x32\xa1\xd2\x38\x3c\x65\x1e\x0a\x46\xc2\x6f\x99\x1d\xb9\x71\x31\x71\x96\x41\x69\x35\x21\x71\x96\x98\x1c\x48\x4a\xe4\xcd\x14\xfd\x8c\x8e\xc5\xbe\xae\x15\xdc\xb8\x96\xcf\x95\x46\xfb\x53\xc2\x03\x8a\xc6\x9f\xef\xea\x78\x40\x05\x24\xa9\x6b\xe9\x40\xef\x37\xde\xc8\x00\x56\x20\x18\xde\x22\xe9\x35\xa4\x42\x32\xe3\x8f\xd6\xfe\xc0\x80\x4d\xcd\x65\xe2\xd7\x6f\x8e\x5f\x8f\x52\x5a\x31\x17\xa6\xf7\x51\xa4\xba\xeb\x33\x3e\xb5\x6a\xef\x8b\xfe\xcb\xfe\x8b\xc1\xcc\x9f\x88\x9b\x58\x60\x7e\x33\x8d\x89\x8f\xfb\x33\x19\x06\xcd\x1a\x8d\xb9\xca\x6d\x38\x01\x7d\x01\x95\xad\x62\x96\x0c\x2f\x3b\x34\x22\x1c\x82\xcd\x63\x8c\x83\x3b\x0d\xd9\x94\x89\xe3\x18\xf9\x80\x3c\xce\x84\xbe\x9d\x3a\x34\xf5\x2e\x73\x82\xa6\x11\xa4\xbd\x02\x48\xd8\x72\xbf\x48\x10\xcf\x6d\xcd\x49\x4d\xc3\x20\xc0\x3e\x8c\x09\x45\x9c\xd4\x3b\x47\x60\x3d\x59\xaf\x4b\x6c\xd8\x24\x44\xd3\x0d\x04\xe0\x0e\xc3\xfa\xb0\x84\x25\xf2\x23\x34\x86\x89\xfe\x59\x3b\x03\x49\x76\x84\xf3\x59\x17\x12\xbe\xde\x5f\x0e\x75\xfc\x5b\xe2\x30\x06\x44\x0d\xfc\x5a\xb0\x8c\x5b\x24\x26\x28\x24\xc1\xbc\x6f\x46\xe7\x94\xe0\x75\x58\x64\xb6\x46\x9d\xee\x45\x0c\xae\xf5\xbf\xbd\xd5\xef\x0d\xa9\x04\xa7\x9a\xb8\xbf\xea\x61\x0f\x33\x03\x4e\x46\xda\x4f\x0d\x22\x7d\xca\xe4\x2c\x8c\x8c\xc7\xbf\x16\xa8\x9d\x45\xce\x42\x93\x18\xa6\xa1\xaf\x7d\xfa\x0c\xd8\xc1\xb5\xa7\xcb\x32\xf6\xcc\x78\xcd\x97\x9a\x3a\xd7\x33\xab\x8f\xec\x65\x68\x14\x1a\x45\x91\xe6\x56\xa1\x34\x67\x8e\xc8\xe4\x5c\x99\xd0\x09\xe6\x5c\x11\xb5\x9a\xb3\xdb\x11\x9b\xc8\x07\xc4\x6d\xbc\x47\x5f\xf7\x79\x73\xaf\x74\x00\x46\xeb\xbb\xb3\xa1\x1d\x62\x2e\x24\x0e\x5d\x04\xec\x53\xba\xa5\xc5\x25\xad\x34\x09\x8e\xf6\xf9\x2a\x8e\xf7\xcf\x50\xb8\x81\x8c\xd7\x61\x16\xfa\xe6\x8f\xe3\xdc\x8d\xcb\xc9\x29\x88\xbe\x2c\xa9\xdf\x2e\xce\xdf\xc7\x4a\xd9\xd3\x5b\x9c\xbd\x64\xc9\x90\x93\x95\x92\x37\xea\xbb\x5b\x65\xcb\xc7\x61\x52\x22\x57\xdc\xd6\xd7\x56\x71\x1b\x65\x82\x90\xae\xb5\xf6\x54\x8c\xd0\x6f\x27\x53\x11\x96\xae\x21\x57\xca\x35\x76\x15\x9a\x54\xde\xb6\xac\x23\x2e\x38\xc6\x61\x24\xc9\xb8\x3e\xe4\x76\xd5\x8a\xf2\x59\xf8\x9b\x65\x9e\x91\x95\xab\xc2\x15\x63\xcc\x38\xe4\x0d\xcd\xd6\xce\x59\xa2\xdd\x45\x29\xc2\x1b\x3c\xe7\xb2\x59\x9d\x4d\x36\xf9\x8a\x85\xbf\x53\xe0\xeb\xd1\xcc\x5c\x0e\xaa\x6c\x32\xee\xeb\xf4\xb2\x9a\x64\x9e\x5a\x98\xf9\x44\x9f\xcd\x55\xbd\x74\x4e\xeb\xcc\xd2\x88\x01\x7d\x91\x58\xe1\xbd\x97\x8f\xca\xf5\x59\x4f\x0f\xf9\xb7\xeb\xdc\xe0\xce\xc9\xdd\xb0\x5e\xdf\x94\x03\x7c\x19\xd7\x77\x6b\x9a\xad\x74\x78\xe7\xa6\x7a\xfd\x2a\x94\xbd\x1f\x58\xd7\x03\xdd\x94\xbf\xfb\xdd\x42\x1f\x5f\xc0\xef\xbd\x54\xa7\x6b\x52\x9b\x32\x97\x87\x2e\x91\xc8\xe9\x76\x53\x5b\x1d\xda\x20\x6e\x6f\x70\x36\xcb\x23\xcc\x46\x62\x74\x1e\x73\x33\x47\xc3\xf1\xa2\xf6\x0e\x24\x72\xb0\x6c\x37\x31\xd0\xbe\x39\x3f\xbe\x43\x5f\xcf\x95\x9d\xaa\xa6\x3b\xf8\xe0\x51\x3e\xe5\x61\x45\x8f\x0d\x50\x5a\x93\x14\x34\x92\x55\x56\xa0\xe4\x89\xa5\xd1\x5d\xe4\x88\xc9\xdd\xd7\xd6\xac\x37\xc3\x63\x7c\x42\x6e\x75\x1c\xb7\x7e\x99\x95\xc9\xf6\xf6\x95\x57\xc5\xa1\xd2\xb8\x30\x09\x6f\xaa\xe6\x56\x56\x15\x16\x77\x23\xab\x14\x12\x7a\x14\xc5\xae\xd8\xd6\x06\xb4\xbe\x1c\xfc\xe5\x8f\x1c\xb3\x0a\x34\x56\xd8\xea\xf0\x5f\x2f\x8a\x21\x6a\x86\x99\x44\x87\x15\x67\xd2\x9e\x3b\x38\xed\xa2\xf7\xff\x92\xf0\xf7\xa3\x8b\xf7\x49\xe9\xb1\x55\x14\x0c\x7b\x48\xe1\x52\x4b\x43\x42\x7b\x5e\x14\x6f\xb2\x28\xc3\x12\x62\x95\xc6\xa1\x9b\x88\x16\x2e\x89\xe6\x73\x8c\xb6\x67\x18\x8b\xe7\x17\xc3\x16\x65\x32\x6a\x22\x07\xd3\x90\xa9\x5c\x99\x39\x53\x2c\xa0\x95\x44\x72\x9b\xe7\x6b\xc6\xe1\xfa\xcd\xa1\x33\x0d\x3e\xec\x2c\xa5\x3b\xce\xc8\x74\xd6\x43\xf7\x88\x04\x68\x4c\x02\x22\xe7\xf5\x9a\xe2\xb5\x09\x26\xbd\x31\xb0\x3e\xec\x3c\x7b\x3d\x3c\x39\x3d\x1e\xf5\x73\x5f\xef\xda\x9d\x78\x1f\xf6\xf6\x2a\x6f\xfe\x77\xcf\xd8\xd4\x06\x93\x0c\x7e\xdc\xdb\x33\xa3\x71\xea\x61\x62\xed\xb4\xeb\xb5\xb6\x9b\x3c\x46\x99\x5e\x5f\xec\xed\xad\x9d\xb4\x13\xed\x44\xcf\xe8\x26\x8a\xc6\x67\xe1\x7f\x01\xd5\x33\xdf\xe1\x8a\xe5\xe1\xf3\x82\x30\x3f\x47\x4d\x21\x82\x25\x9e\x32\x93\x46\x62\x0f\xf0\x4c\x36\x85\xa9\xeb\xa4\x49\x46\xdf\x36\xa7\xf3\xd1\x5b\x90\x05\x10\x01\xb7\x67\xe7\x67\x37\x17\x97\x27\x27\xef\x2e\xae\x86\xaf\x4e\x4f\x6e\xad\x5a\xea\x7a\xf0\x92\xba\x50\xde\x0c\xd1\x69\x7d\xd9\x43\xa8\x40\x4e\x60\x8f\x29\xd2\x9e\x67\x59\x5e\xc0\x6d\xae\xdf\x5a\xb8\x25\x05\xf2\x6d\xd3\xe1\xe9\xf0\xea\xdf\xc5\x5a\xcf\xb9\x21\xd5\x3b\x24\x33\x2f\x7e\xa5\x02\xf8\x55\x03\xa9\x6d\x56\x18\x63\xbb\x2e\x2a\xde\x4b\x96\xe7\x0f\x4d\x39\xeb\x4e\x32\x18\x95\x81\x6f\x6a\xba\x02\xe7\x2e\xd3\xcf\x66\xd2\x0e\xd2\xab\xf8\x5c\x51\x88\x8d\xdf\xf4\xd9\xa5\x1f\x74\xe9\x07\xdf\x6c\xfa\x41\x29\xc7\x76\x79\x08\x0d\x4f\x97\x87\x00\x5d\x1e\x42\xc5\xd3\xe5\x21\x74\x79\x08\xdf\xda\x31\x8c\x79\xba\x3c\x04\x78\xba\x79\x08\xa5\x1b\x75\x97\x90\xa0\x9f\x2e\x21\xa1\xee\xe9\x12\x12\xba\x84\x84\x2e\x21\x41\x3f\x5d\x42\xc2\xc2\xd3\x25\x24\x74\x09\x09\xe5\x4d\xbb\x84\x84\x2e\x21\xa1\x4b\x48\xe8\x12\x12\x8a\x4f\x97\x90\x50\x0d\xb0\x4b\x48\xe8\x12\x12\xba\x84\x84\xec\xd3\x25\x24\xb8\xa7\x4b\x48\xe8\x12\x12\xba\x84\x84\x27\xee\x09\xef\x12\x12\x9e\x6a\x42\x42\xa9\x03\xbc\xcb\x4c\xe8\x32\x13\x32\x4f\x97\x99\xd0\x65\x26\x74\x99\x09\x5d\x66\x42\x97\x99\x50\xf2\x74\x99\x09\x5d\x66\x42\xee\xe9\x32\x13\xd6\xa2\x83\x76\x29\x0a\x5d\x8a\x82\x7d\xbe\xf3\x14\x85\x98\x13\x39\xdf\x40\x6e\x42\x06\x6e\x53\x9b\xd5\x98\xb6\xb9\x83\x2a\x45\xc5\xb6\xcd\x25\x1f\x7c\xc5\xab\xb6\xee\x30\x1f\x63\xce\xc4\xa6\xac\xf9\xb7\x39\xf8\x9b\x93\xa2\x99\x05\x69\xdd\x65\xc5\x12\xb9\xf6\xc0\xb1\xea\xd9\xcf\x27\x20\x6c\x40\x45\x6b\x6b\xc4\xea\x58\x80\x4b\x8c\x82\xf0\x8a\xc7\x42\x1e\xfa\x21\xa1\x23\xad\xd8\x6d\xce\x58\x3a\xaa\xec\x73\x29\x2b\x28\xa7\x96\x21\x05\x04\x8c\x4a\x0a\x3b\xc3\x0b\x60\x1c\x66\x4c\x48\xd5\x63\x73\xec\x85\xe3\x17\x8e\x43\x26\xb1\x9a\x5b\xa1\x16\x89\x2b\x1c\x6d\x46\x8e\x8e\x99\x30\x5f\xe8\x9f\x1b\x61\xea\x95\x26\x8c\x8a\x19\xd9\x9c\x95\x5b\x58\xbe\xb7\xbe\xf7\xc5\x96\xed\xad\x5f\x7f\x08\x57\xbb\x5c\x6f\x8f\x8f\x16\x56\xa9\x36\x65\x2a\x41\x7f\xb9\x55\xfa\x2a\xab\xa0\x3f\x7d\xb1\x75\xd0\x9f\x56\x5f\x09\x3b\x99\x76\xce\x32\x1e\xf0\xc6\xa5\x60\xd4\x59\x23\x7a\x39\xf5\x11\xbe\x9e\xf4\x7d\x10\x33\x16\x07\xe6\xbc\x34\x16\x98\x03\xa6\x2d\x6e\xa9\x85\xc5\xe5\xfb\x52\x2b\x36\x9a\x21\x8e\xfd\x0b\x24\xc4\x03\xe3\xfe\x17\x5b\xba\x7c\xb7\xab\xaf\x61\xe9\xed\xc0\x08\xde\xbe\x1b\x35\x4e\x39\xa6\x1e\x9f\x47\x8a\x8d\x26\x24\xc0\xee\xf8\xcf\x9d\x86\x08\x8d\x20\x44\x16\x43\x18\x63\xf9\x80\x71\xd3\xad\xb6\x86\x4b\xb3\xe4\x91\xee\x7e\x6a\x69\xdd\x51\x7a\x9e\x8f\x5b\x08\x54\x14\x84\xfb\x5f\x95\xd1\x0d\x1d\xbb\xe1\xb4\xa3\x93\xe6\xc3\x62\x58\x24\x94\x93\x5c\x47\xcb\x50\x46\xc6\x5c\x7b\x1d\xa0\xa9\xbe\xd2\x8c\xfa\xc4\x43\x52\xdf\xb7\x26\x67\xfa\xae\xbe\xc6\xc9\x36\x5d\x93\xbf\xf2\x97\xfc\xed\x24\xce\x9e\x09\x0a\x84\x09\xa7\xb5\x67\xcc\xfa\xa8\xbd\x99\x30\x98\x9e\x64\x9d\xb3\xa4\x87\x98\xd2\x06\xcb\x26\xbe\x6e\x2c\xb8\xf6\xce\xf7\x8e\xc7\x6f\x71\xad\x6d\x0f\x8f\xe2\xf1\xb7\xb6\x87\x27\xc8\xce\xd6\xb2\xbe\xc3\x73\xe7\x2d\x7e\x7b\x7c\xd4\x08\xd3\x47\x12\x8d\x91\x68\xcc\xeb\x5b\x7d\x4d\xf0\x7c\xf3\xa2\xf7\x6d\xda\xc9\x13\x5c\x99\x44\xc2\xea\x14\x3e\xb3\x6b\x36\x82\x8c\x38\xbb\x27\x3e\xf6\xd5\x04\x1a\x07\x9c\x96\xd9\x38\x98\xf4\x04\x99\x52\x65\x65\x28\x13\x60\xa2\x99\x7f\xbf\x5d\x6a\x60\x82\x08\x11\x30\xc5\x14\x73\x6d\xad\x8c\xe7\x89\x5a\xb0\x49\x2a\xd0\xc1\xd7\x1b\x25\x01\xdd\xc3\x9a\xd7\xbf\x4d\xac\xb9\x1b\x9c\x59\xfc\xe4\xc4\x65\x34\x3a\x75\x84\xa1\xcc\x40\x18\x4e\x74\x1c\xbb\x5b\xd8\x7a\x77\x8f\x7a\xf2\xe1\xa9\xb6\x1d\xa0\x2a\x22\xd8\xf8\xe2\x7d\x11\x3e\xce\xf5\xf4\x3d\x32\xb3\x1e\x60\x33\x47\xb7\x50\xc3\x88\xf8\x9a\x1c\x1d\x8a\xcd\xee\xb4\x1a\xfe\xea\x04\x10\x73\x92\xec\x83\xef\x46\x7a\x5b\xb4\x69\x20\x6d\x09\x00\xee\x11\x27\x2c\x16\x20\x30\x15\x44\x92\x7b\xc3\xe0\x9b\x4f\x79\x68\x1b\x3b\x59\x88\x9e\x54\x14\x7f\x17\x8a\xc1\x91\x42\x9e\x35\x4f\x5e\xfb\x08\x4a\x70\x0a\xfa\xe6\x96\xfb\x91\x86\x6e\xf6\x90\xbc\xc4\x36\x69\x67\x7e\x24\x22\x3a\x13\xbe\xae\x59\x3c\x8a\x30\xf7\x90\xa2\x1e\x9f\x85\x88\xd0\x36\xc5\x14\x9c\xe7\x43\x24\xe1\xef\xc6\x2a\x52\xfd\x6c\x8a\x80\x38\x63\xf2\x82\x13\xea\x91\x08\x05\x9b\x97\xd6\x97\x65\xdd\x3d\x41\x91\xad\xa6\x05\x22\x87\x68\x22\x33\x37\xb6\x0c\x72\x2a\x4f\xc9\x04\x4b\x12\xe2\x37\x2c\xae\xaf\x9e\x03\x5f\x2e\xcf\xef\xaa\x80\xd6\xea\x2b\x15\x58\x30\x8e\xe3\x24\xf1\xee\xb0\x84\x29\x47\x54\xb6\xa9\xc4\x61\xde\xd7\xd6\xfe\x4c\x61\x52\xc6\x78\x8c\x1b\xef\x92\xfb\xaa\x99\x87\x9f\x6b\x6e\xa5\x85\x73\xbd\x17\xcf\x13\x06\x54\xf2\x7f\x73\x8b\xce\x63\x21\x37\xac\xe3\x5e\x25\x7d\x7c\x05\x2d\x37\x1d\xe0\xf7\xaa\xe7\xa6\x23\xdc\xbc\xec\xbc\x5a\xe8\xeb\x09\x0a\xce\xc7\xe9\xba\xe9\x74\x7e\xbb\xda\xae\xc8\xe5\xa8\xad\xf1\xc8\x39\x07\x77\x13\x47\xce\x6d\x3a\xa8\xa1\xa9\x92\x8a\x77\xc9\x64\x00\xa1\x82\xf8\xd5\xbc\xf4\xf5\x0e\xa7\x75\x0e\xe1\xef\x26\x85\x70\x43\x69\xc0\x16\xfa\xf2\xa7\xc4\x6a\x56\x6d\x76\xa3\xe2\xcf\xc2\x64\x36\x8a\x60\x37\xa1\x30\x94\xe0\x82\xa5\x74\x32\xb9\xd9\x82\x45\x1c\x45\x8c\x2b\x76\x48\xc3\x0f\x2d\xaa\x75\x65\x10\x2a\x82\xd2\x2c\x9e\x84\x4e\xd3\x90\x43\xfb\x9d\x78\x96\x74\x75\xe3\x7e\x73\x59\x9b\x62\xb7\x9e\x91\x45\xec\xcd\x00\x09\xd8\x7a\xd1\x7f\xb9\x05\x3b\x84\x7a\x41\xec\x2b\x61\x83\x40\xc4\xe3\x90\x50\xc6\xdd\x14\xed\xe7\x5e\xee\xbf\xfc\xbf\xad\x06\xd8\x56\xc0\x5c\x6f\x45\x1c\xdf\x13\xfc\xb0\xe5\x20\xad\x79\xf4\x4c\xce\x30\x4f\x07\x5c\x2f\x71\x86\x93\x85\x64\x58\xab\x9d\x08\x27\x53\x15\x27\x0b\x09\xc7\x78\x4c\x10\x75\x38\xaf\x3d\x66\x80\x59\x32\x3c\x62\x61\xc4\x28\xa6\x72\xfd\x39\xa4\xe7\x0b\x5d\xac\xc6\x23\x02\xeb\xdc\x50\x2f\x01\xa3\x66\x0a\x79\x92\xdc\x37\xc5\x3b\x33\xda\x46\xf4\xc0\xe3\x43\x63\xbf\x7e\x4d\xc6\xe5\x33\x37\x1f\xb1\x7b\x2c\xae\x6c\x43\x88\x61\x73\x3c\x19\x40\x0f\x8e\xce\xdf\x5d\x9c\x9f\x9d\x9c\x5d\xb5\x0c\x26\x53\x6d\x0e\xcf\x0e\x8f\xce\xcf\x8e\x0f\x1b\x5e\x3b\x3e\x3f\x7a\x7b\x72\xd9\xf4\xd2\xe5\xfb\x61\x53\x7f\xaf\x4f\x87\x67\x6f\x1b\xde\x79\xf3\xea\x70\x54\x17\xd8\xa6\xdf\x19\xfe\x7e\x72\xf3\xc7\xc9\xab\x37\x47\x87\x57\x0d\xaf\xfe\xf6\xfe\xe2\xdf\x57\x8d\xc8\xbf\x3d\xb9\x7c\x75\x72\x79\x5e\xaf\xe1\xe9\x90\xba\xd1\xd5\x79\xc3\x4b\x97\x87\x67\xbf\x36\x76\x38\x3a\x3f\x6d\x7a\xe5\x3f\x27\x17\x17\x27\xa7\xc3\xb3\xa6\xd7\xce\xcf\xdf\x9e\x9c\x5c\xd4\xf4\xd8\x2e\xbe\xa9\x65\x7c\x59\x5a\x8f\xf7\xa2\x65\xd8\xd4\xaa\xe1\xbf\x0e\x7a\x5b\xb9\x57\xc8\x47\x4d\x47\xad\x6b\xd5\x28\x31\x48\xc1\x47\x38\x6c\x28\x62\xeb\xb2\xc1\xb4\x6f\xd2\xa1\x31\xd7\xae\x66\x9d\x4c\x9a\xec\x3f\xca\xde\xbe\x8d\x38\x9e\x90\x8f\x07\xb6\xb3\x79\x7d\x32\xa6\xf1\x36\xe4\x92\x06\xe1\xd6\x53\x36\xae\x2d\x92\x23\xc3\xa8\xef\x13\x7e\x6b\x46\x30\x61\x41\xc0\x1e\x9a\x04\x93\xc6\x29\x51\x57\x0c\x3e\x58\xb8\xc0\x01\xc2\x21\x44\x51\xa4\x94\xce\x03\xd8\x03\x0f\x45\xc8\x23\x72\xde\x13\xde\x0c\xfb\x71\xd0\x14\xba\x76\xbb\xd8\xa0\xff\x31\x0c\x6e\x15\x28\x85\xb7\x41\xbf\x27\x88\xc4\xee\x7b\x9f\x08\xe9\x45\x0d\x60\xcd\x4b\x2e\x1f\xc2\x35\x9d\xf9\x13\x71\x00\xb7\xea\x9f\x1c\xc8\x19\xb9\x57\x5d\xa9\x7f\xd2\xef\x6b\x3b\xd8\x53\xa3\xe6\xd8\x3f\x80\x5b\xf3\x21\x07\x2f\x22\xd3\x03\xb8\x8d\xc8\xb4\x9f\x12\xc9\x2d\xec\xd5\x2b\x5b\x11\xe2\x77\x07\x70\xab\xff\x75\x88\x8b\xbe\xa2\x16\x05\x72\x8e\x38\x3d\x80\x5b\xf5\x4f\xa6\xab\xd7\x0d\x51\x5a\xba\x6e\x11\xa1\x86\x2e\x8c\x9a\xa6\x4b\x8c\x5a\x7f\x6b\x8a\xdd\x0a\x39\x2e\x06\x46\x2f\x85\x51\x5f\x86\x6b\x79\x5b\x4e\xa2\x29\xa1\xd3\x57\xb1\x77\x87\x2b\xab\xff\xb6\xe0\xfd\xa2\x29\x97\x05\xbb\x9c\xa1\x75\x58\x30\xdd\xc7\x1a\x46\xa6\x5c\x55\x5d\xf5\x90\x3f\xd9\x18\x7c\x1c\x61\xea\x63\xea\x11\x2c\xf6\x73\x82\xc0\x14\x7f\xd2\xef\x70\x72\x8f\x75\x51\x6c\xc1\x02\x0c\x4c\xe7\x97\x55\x2b\x47\x36\xa9\xda\x67\x19\x87\xdc\x5c\x29\xe8\x66\x98\x16\xc7\x7d\x8b\x79\xce\x75\x53\x09\xd3\xc7\x12\xf3\x50\x97\xe9\x2e\x8c\x38\x60\x9e\xa6\x23\xd8\x79\x3f\xda\x87\xc3\xd1\xf0\x50\x7b\xfe\x4e\xde\xeb\x80\x41\x85\x49\x35\x3d\x5a\x82\xd9\x16\x05\xdc\x00\x79\x1e\xe3\xda\xae\xb0\x3a\x76\x21\xd3\xff\x2f\x56\x93\x29\xfb\x30\xc3\x1c\xeb\x8e\xd3\xfc\x7e\xa1\x26\x3a\x60\x73\xa5\xc2\x5b\x31\x45\xc1\xe3\x18\x49\xac\xff\x36\xd9\xb3\x35\xf6\x30\x11\x60\x93\xe5\x7b\x01\xbe\xc7\xc1\x3e\x44\x98\xf7\x92\xc1\x5b\xbc\x77\x44\x2e\x7b\xcc\x8e\xaa\x12\xac\x69\xb5\xa6\xb4\x75\xdb\x59\xcf\x00\xdd\xad\xb1\x6b\xf6\xf6\xae\x92\x90\x20\xe0\xf8\xbf\x31\xe1\x4a\x7a\x97\x93\xb2\x62\x91\x7d\x53\xbe\x0d\xde\x5f\x0e\xab\xa7\x88\x55\x40\xe8\xef\x55\x49\xb9\x95\xeb\xe4\x37\x1f\xaf\x65\x8f\xd5\x2c\x3a\x83\x5a\xfe\x4e\x8e\xd2\x2a\xcb\x3e\x4b\x1c\x46\xeb\x96\x3c\x57\x09\xcc\x35\x8b\x9d\x3a\x07\x33\x8e\x66\x38\xc4\x1c\x05\x09\x7b\x58\x41\x23\x74\x2c\x51\x6a\xb7\x8f\xd4\xbe\xa3\x7f\x7c\x87\xa2\x4b\xec\xc7\x35\x05\x5d\x66\x44\x75\x3a\x77\x3a\x4c\x95\x0c\x52\x93\x98\x08\xa0\xc6\x50\x56\xed\x4d\x5e\x45\xf4\x54\x2f\xb2\x15\x49\x19\xd1\x93\x41\xa9\x49\xee\x54\x82\x55\xf2\xe8\x2b\xca\x9d\x7c\xaa\xd6\xb8\x9e\xce\x67\x48\xf1\xfa\xd5\xd5\xa9\xb2\xd1\xff\xef\x39\xf8\x68\x2e\xf6\x61\x1c\x4b\xbd\x62\x1e\xa2\xba\x5a\x05\xa2\x73\xfd\xce\x0e\xe3\x40\x19\xc5\xbb\x40\xaa\x4f\x4c\x55\xc3\x74\x8d\x1d\x4e\xad\xc4\x4c\x83\x6c\x2c\x8a\x9f\xef\x5f\xcc\x3c\x6c\xe4\xf2\x96\x0d\xdf\xd9\xf2\xd5\xae\x6a\xe9\xee\x67\x29\xa0\xd8\xdd\xcf\xd2\xdd\xcf\x52\xc7\x9d\xdd\xb5\x2c\x0d\x4f\x77\x2d\x0b\x74\xd7\xb2\x54\x3c\xdd\xb5\x2c\xdd\xb5\x2c\xdf\x5a\x31\x3a\xf3\x74\xd7\xb2\xc0\xd3\xbd\x96\xa5\xbb\x8d\x65\xe1\xe9\x6e\x63\xa9\x7b\xba\xdb\x58\xba\xdb\x58\xba\xdb\x58\xf4\xd3\xdd\xc6\xb2\xf0\x74\xb7\xb1\x74\xb7\xb1\x94\x37\xed\x6e\x63\xe9\x6e\x63\xe9\x6e\x63\xe9\x6e\x63\x29\x3e\xdd\x6d\x2c\xd5\x00\xbb\xdb\x58\xba\xdb\x58\xba\xdb\x58\xb2\x4f\x77\x1b\x8b\x7b\xba\xdb\x58\xba\xdb\x58\xba\xdb\x58\x9e\xb8\x03\xbc\xbb\x8d\xe5\xa9\xde\xc6\xd2\x5d\xc2\xe2\x9e\xee\x12\x96\xc2\xd3\x5d\xc2\xd2\x5d\xc2\xd2\x5d\xc2\xd2\x5d\xc2\xd2\x5d\xc2\x52\xf2\x74\x97\xb0\x74\x97\xb0\xe4\x9e\xee\x12\x96\xc7\xa8\x9e\xdd\xdd\x2b\xdd\xdd\x2b\xf6\xf9\x5e\xef\x5e\x09\xd0\x18\x07\xa5\x7b\x4a\x83\x89\xd8\x3e\xf5\xba\x81\xd9\x0b\x4c\x7e\xaa\x11\x2a\x79\xaf\xce\xc1\x64\x46\xa1\x53\x40\x84\x60\x1e\xd1\x35\xb3\x89\x9c\x19\x55\xb9\x36\xe8\x5f\xf7\x67\x12\xaa\x9d\x88\xb4\xc5\xd2\xe1\x85\x66\xa7\x9f\x7f\x54\xa4\xcf\x91\xa7\x36\x0e\x08\x18\x9d\x9a\xe3\xde\x4a\x11\xaf\xe4\x32\xe3\xa1\x4b\x14\x4a\x12\xa7\xe1\xe2\xe8\xf2\x04\x38\x9e\xc6\x01\xe2\x80\x3f\x46\x1c\x0b\x5d\xcc\x06\xa2\x4f\xa7\xc1\xe7\xe8\xd3\x29\xfb\xfc\xe9\xf9\xfe\xcf\x2f\x3f\xd7\x20\x6a\xe8\xff\xeb\xa1\x7a\x9d\xe2\x1a\x7d\x3a\xfb\x7c\xd3\xfb\xa0\x50\xfe\xb1\x1c\xe5\x33\x66\x8e\xdf\xe5\x0c\x51\xf8\xf1\xa5\x5b\x26\x0f\x51\x85\x7b\xb2\x54\xbe\x59\x2b\x04\x53\x72\x8f\x69\x6d\x29\xf1\x56\x6c\x68\xbd\x65\x59\x6a\xac\xa0\xc1\xc5\x34\xff\x02\xb5\x2f\xc4\x7c\x5b\xd8\x99\x8b\x47\x16\xce\x97\x5b\x8a\x8a\x3a\xef\xfd\x42\xed\x4f\xf7\xa7\x71\x23\xf0\xc1\x22\xa6\xd5\xf9\x43\x11\xe2\x98\x16\x35\xcc\x38\xf2\x91\xc4\x57\x24\x6f\xd6\x55\x4c\x93\x53\x17\x55\x93\x5e\x21\x60\xa2\x30\x85\xef\x13\xb8\x99\x77\x2a\x8d\xb5\x7a\xc3\x4c\xc7\x8e\x48\xbb\x69\xc3\x03\x12\x10\x20\x21\x2d\xea\xb9\x8d\xa8\xd5\x94\xdf\x2f\xd6\x8e\xaa\xd2\x97\xab\xf4\xe3\xc2\x60\x17\xeb\x45\xad\x38\x52\x2f\xe6\x6a\x8d\xb2\x35\xa4\xb4\xe4\x7a\xb0\x9a\x4b\x32\x0b\xad\x07\xfd\xff\x03\x00\x00\xff\xff\x2c\x6e\x3a\x4d\x8b\xee\x01\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x7b\x6f\x1b\xb7\xb2\xf8\xff\xfd\x14\x03\xe7\x02\x7e\xc0\x92\x92\xb4\xa7\xb8\xd7\x07\xfd\x01\x8e\xed\x34\x6a\x1c\xdb\xc7\x72\x5a\x9c\x63\x04\x36\xb5\x4b\x49\xac\x77\xc9\x3d\x24\xd7\x8e\x1a\xe4\xbb\xff\xc0\xd7\xbe\xb4\x2f\xc9\x52\xe2\xa4\xbb\x40\x1b\x59\x5a\x0e\x87\xe4\xcc\x70\x66\x38\x33\x7c\x06\x47\x2c\x9a\x73\x32\x9d\x49\x78\xf9\xfc\xe5\x4b\xf8\x95\xb1\x69\x80\xe1\xf4\xf4\xa8\x0f\x87\x41\x00\x97\xea\x27\x01\x97\x58\x60\x7e\x8f\xfd\xfe\x0f\xcf\xe0\x87\x67\x70\x4a\x3c\x4c\x05\xf6\x21\xa6\x3e\xe6\x20\x67\x18\x0e\x23\xe4\xcd\xb0\xfb\x65\x1f\x7e\xc7\x5c\x10\x46\xe1\x65\xff\x39\xec\xa8\x17\xb6\xec\x4f\x5b\xbb\xff\xfc\xe1\x19\xcc\x59\x0c\x21\x9a\x03\x65\x12\x62\x81\x41\xce\x88\x80\x09\x09\x30\xe0\x8f\x1e\x8e\x24\x10\x0a\x1e\x0b\xa3\x80\x20\xea\x61\x78\x20\x72\xa6\xbb\xb1\x40\x14\x1e\xff\xb6\x20\xd8\x58\x22\x42\x01\x81\xc7\xa2\x39\xb0\x49\xf6\x3d\x40\xd2\x60\xac\x9e\x99\x94\xd1\xc1\x60\xf0\xf0\xf0\xd0\x47\x1a\xdb\x3e\xe3\xd3\x41\x60\xde\x14\x83\xd3\xe1\xd1\xc9\xd9\xe8\xa4\xf7\xb2\xff\xdc\xb4\x79\x4f\x03\x2c\x04\x70\xfc\xdf\x98\x70\xec\xc3\x78\x0e\x28\x8a\x02\xe2\xa1\x71\x80\x21\x40\x0f\xc0\x38\xa0\x29\xc7\xd8\x07\xc9\x14\xc6\x0f\x9c\x48\x42\xa7\xfb\x20\xd8\x44\x3e\x20\x8e\x7f\x78\x06\x3e\x11\x92\x93\x71\x2c\x73\xd3\xe5\xf0\x23\x22\xf7\x02\xa3\x80\x28\x6c\x1d\x8e\x60\x38\xda\x82\x57\x87\xa3\xe1\x68\xff\x87\x67\xf0\xc7\xf0\xea\xcd\xf9\xfb\x2b\xf8\xe3\xf0\xf2\xf2\xf0\xec\x6a\x78\x32\x82\xf3\x4b\x38\x3a\x3f\x3b\x1e\x5e\x0d\xcf\xcf\x46\x70\xfe\x1a\x0e\xcf\xfe\x0d\x6f\x87\x67\xc7\xfb\x80\x89\x9c\x61\x0e\xf8\x63\xc4\x15\xfe\x8c\x03\x51\x13\x69\x56\x6f\x84\x71\x0e\x81\x09\x33\x08\x89\x08\x7b\x64\x42\x3c\x08\x10\x9d\xc6\x68\x8a\x61\xca\xee\x31\xa7\x84\x4e\x21\xc2\x3c\x24\x42\x2d\xa7\x00\x44\xfd\x1f\x9e\x41\x40\x42\x22\x91\xd4\xdf\x2c\x0c\xaa\xff\x03\xa1\x13\x76\xf0\x03\x80\x24\x32\xc0\x07\x70\x8c\x24\x8a\x38\xf3\x06\x7f\x30\x7e\x37\x09\xd8\xc3\x15\x0e\xa3\x00\x49\xfc\x03\x80\x8f\x85\xc7\x49\xa4\x40\x1d\xc0\xd5\x0c\x27\x2f\x43\xf1\x65\xe0\x58\xb0\x98\x7b\xaa\xd5\xc7\x9e\xef\x05\x3d\x21\x79\xec\xc9\x1e\x45\x21\x3e\x58\x78\x3d\x79\x6b\x86\x44\x8f\xa0\xf0\x00\x26\x28\x10\xf8\x87\x08\xc9\x99\x50\xc8\x4d\xb1\x54\xff\x94\xa0\x30\x89\xa9\xa7\xfe\x52\x94\xa9\x97\x76\x8a\x15\x41\x4e\x18\x0f\xf5\x98\x01\x8d\x59\x2c\x01\x95\xf5\x09\x10\x21\x8e\x42\x2c\x31\x17\x06\x7c\x0f\x2a\x11\x54\x8f\x23\xaf\x03\x90\x3c\x76\x5f\xe6\x50\x3a\x84\x49\x1c\x04\x40\xa8\x90\x9a\x19\xd8\xa4\xbc\x6b\x45\x9c\xf3\xb6\x43\xd2\x2f\x7f\x03\x83\xf2\x71\x80\x25\x6e\x3b\x2a\xf3\xf6\x13\x1d\xc4\x61\x10\x2c\x3b\x8e\x20\x58\x6e\x24\x11\x67\x7f\x62\x4f\xd6\x0d\x40\x78\x33\x1c\xa2\x03\xfb\x17\x80\x9c\x47\xf8\x00\x94\x08\xa2\xd3\x1c\xac\x80\x79\x9a\x30\x1e\x01\x2c\x20\xa2\x35\x8f\xa9\x77\x4b\xe8\x31\x44\x74\xfe\x0d\xcf\x81\xda\xc3\x18\xc5\x54\x6a\x24\xcd\xab\x16\xdf\xe2\xa0\x1c\x04\x2b\x34\x2b\x88\xd2\xc8\x34\xe2\x27\xe3\x14\x83\x4f\x9f\xec\xc7\xcf\x9f\x07\x0e\x61\xf5\xad\xfb\xfc\xf9\xf3\xe0\xa1\x00\x4c\xfd\xac\x46\xf8\xf9\x73\x0e\x6c\x84\x38\xa6\xb2\xe7\x31\xaa\xb6\x54\xcc\x8b\x93\x69\xde\x0a\xd0\x18\x07\xe2\x00\xcc\xbf\xb9\x9f\x94\xac\xf5\x38\x56\xa3\xc9\x4e\x51\xa9\x20\xce\xfe\xc4\x31\xf2\x7b\x92\x84\x98\xc5\xf2\x00\x9e\xe7\x7e\xd3\x92\xaa\xea\x47\xc3\x28\x8b\xbf\x9a\x45\x60\xe3\x32\x4a\xb0\x7f\x9b\x35\x4e\xfe\x88\x02\xe4\xe1\x10\x53\x99\x7c\xf3\x27\x1b\x8b\xf4\xe7\xdc\x44\xf4\x8a\x94\x11\x71\x16\x61\x2e\x09\x16\x29\x21\x98\x89\xb8\x22\x21\x4e\xbf\x2b\xa1\x35\xf3\x18\xba\x3f\x00\x1f\xd9\xe1\x64\x7e\x33\x43\x9d\x32\xbb\xd3\x1d\x25\x70\x33\xef\xa8\x19\x3c\xa7\xc1\x3c\x37\xef\x0b\x9c\x77\x1e\xcb\x28\x96\xc0\x68\x30\xef\x6b\x36\x54\x3d\x81\x74\x3b\xec\x03\x12\x16\x6b\xbf\x9f\xeb\xff\x2e\x1e\x63\x4e\xb1\xc4\xa2\x47\xc2\x30\x96\x4a\x03\x2a\xf4\xe4\xa3\xe9\x95\x5d\x85\xe6\xd1\x16\x46\x74\x9c\xb4\xad\x44\x5c\xff\x8b\x82\x3e\xd8\x17\xc1\x8f\xb9\x91\x13\x4e\x83\x39\x3e\xfc\x55\x09\x60\xb5\x6a\xfb\x4e\xf9\xc1\x3e\x10\x9a\x81\x09\x20\xb0\xc7\xa8\x2f\x60\x47\x60\x0c\xd7\xbf\x8d\xce\xcf\x80\x63\xf5\x2e\xa6\x46\xa1\x51\x30\x1c\xec\x0f\x3b\x4a\x67\x14\x07\x83\x81\x8f\xef\x71\xa0\x96\x58\xf4\xa7\x5a\x45\xee\x7b\x2c\x1c\x44\x9c\x49\xe6\xb1\xa0\x37\x8e\x27\x13\xcc\xc5\xc0\x67\x9e\x30\xdf\xfe\xf8\xec\x4f\xc1\xe8\xee\x6e\x3f\xd7\xbb\x9b\xf2\xdc\x00\xc2\x58\x48\x18\x63\x98\x70\x16\xc2\x8b\xe7\x10\x12\x1a\x4b\x2c\x60\x67\xeb\xe7\xe7\xcf\xc5\xd6\xae\x92\x8e\x2f\x7f\x82\x19\x8b\xb9\xfa\xf2\x7f\x7f\xfe\x49\x7f\x5d\x0e\x99\xc3\x18\x4f\x09\x15\xf0\x30\xc3\x54\xcf\xcb\x84\x70\x21\xd5\xb4\x28\x65\x53\xc4\xe3\x90\x48\xb5\xbe\x30\x34\x9a\xb2\x13\x0d\xea\x57\x1e\x53\x9a\x5f\x28\x00\x24\xf5\x6b\x98\xfa\x4e\xb7\x76\x03\x88\x30\x27\xcc\xdf\x07\x25\xa1\x39\x0e\x11\xd1\xca\xa2\x9a\x7f\x40\x1c\x83\xa7\xb6\xc4\x20\xc0\xfe\xbe\x6a\x95\x83\x99\xed\x13\x53\x1f\x6b\x20\x3e\x90\x02\x46\x8a\x1c\x2d\x4a\x5a\x33\x86\xeb\x10\x51\x34\xc5\x3e\x78\x41\x2c\x24\xe6\x1f\x76\x06\xbe\xd3\x2d\xf5\xd4\x7b\x8c\x2a\xeb\x41\x24\x02\x4f\x0c\x62\x41\xe8\xb4\x97\xfc\xfd\xcc\x63\x74\x42\xa6\xb1\x22\xc8\x1b\xc6\x6f\x04\x0e\xb0\xa7\x94\xf6\x1b\x74\x63\xa1\xee\xee\xe7\x90\x55\x28\xd9\x5f\xb4\xba\xae\x25\xce\xf2\xfc\xa1\xe6\x65\x91\x33\x10\xe7\x68\x5e\xcd\x18\xbf\xa5\x22\x68\x81\x25\x2e\xad\x3c\x33\x8c\x7c\x4c\x38\xf6\x94\x09\x71\xe8\xcd\xbd\x80\x78\xf0\x2b\x47\xd1\x4c\x2d\x99\x82\xa1\x48\xc8\x2c\xfd\xd2\x78\x27\xca\x36\xa6\x7e\x0f\x87\x91\x5c\x14\x31\x76\x5b\x20\x42\xf6\xcc\xa8\xd4\xc7\xcc\xef\x44\xe2\x30\x37\xf6\x52\xf9\x5c\x98\x00\xf3\x46\x71\x17\x2c\x4c\xc8\xa2\x54\x37\x4f\x0f\x84\xc4\xd1\xd0\xcf\x7d\x59\x26\xa3\xcd\x33\x43\x3e\x63\xd1\x6f\x6c\x5c\xfc\xa1\x06\xd1\x92\xd5\x7a\xe3\xc0\x54\xbf\x58\x3d\xaa\xea\xc6\x15\x82\xf0\x37\xc3\xd1\xc8\x76\xab\x08\xac\x5f\xd2\x71\xab\x25\x6e\x9a\x20\xf5\x20\xee\xcd\xc8\x3d\x7e\xcf\x49\xe9\xcf\x55\x14\x5d\x39\x5b\x87\x29\xbc\xd2\xd7\x73\xc3\xde\x4e\xc7\xfd\xe6\xe8\xf5\x08\xde\x5f\x0e\x85\x56\xb7\x0d\x10\x4d\xe0\x63\x0c\xf8\xa3\xe4\x48\xb1\x41\x29\x44\x50\x36\xba\x93\x30\x4a\xa8\xf8\x9a\x69\x18\xd7\x6e\x03\x3b\x8d\x3e\x27\xf7\x98\x6b\x43\x17\x24\x12\x77\xa2\x0f\xa3\x38\x8a\x18\xaf\x86\xaa\xfd\x16\x6a\xf4\xe2\x00\xfa\x7f\x22\xbe\x0f\x7d\xe9\xfe\xdf\x9f\xfe\xa5\x3e\xa8\xff\x33\x0e\xfd\xbf\x48\xd4\xdf\xae\x98\x9d\x25\x56\x0a\xda\xb0\x65\xf1\xd5\x6a\x06\xcd\x4c\xd0\x22\xab\xa6\x4f\xc5\x66\xbe\xd8\x53\x42\xe8\x95\xef\x22\x3e\x5d\x17\x15\x4d\x5b\x90\x4f\x46\x7d\x98\x61\xd5\x79\xac\x34\x3d\x4d\x35\x11\x12\xfa\x5f\x45\x17\x66\xed\xfb\x70\xcc\x2a\x86\x47\x99\xb2\x51\xbc\x20\xf6\x33\x50\xf6\x41\xc4\xde\x0c\x90\x80\xdb\x5e\x40\xc6\x7f\x22\x2e\x6e\xd5\x62\xdf\xf6\x8e\x27\x8c\xfd\x32\x46\xfc\x56\x6d\x81\xa8\x7c\xc6\x41\x6d\x94\x8a\x76\x05\x96\x0a\x86\xda\xa8\x53\x46\xdc\x07\x41\x94\x65\x89\xc0\x63\x41\x40\xb4\x4b\x4d\x3b\xbd\x3c\x2f\xe6\xf5\x40\x63\x81\x15\x15\x2b\x7c\x19\x57\x74\xae\x41\xeb\x5d\x40\xfb\x72\x16\x85\x05\xfc\xed\xc8\x50\x71\xee\xda\x04\xda\x6b\x0b\x6c\x19\x72\xd4\xc2\x6c\xc7\x8a\x9e\x23\x16\x46\x48\x92\x71\x80\x35\x2c\x31\x17\x12\x87\xbb\x5a\xd6\x55\x8c\x93\x4d\xf4\x10\x9c\xf8\xf3\x58\x44\x8c\x35\xdd\x52\xca\x55\x80\x55\xb2\x2f\xeb\x96\xb4\x72\xf0\xbd\xc0\x93\x38\xd0\x0a\x37\x45\xe4\x1e\x07\x73\x6d\x84\x07\x01\x0e\xec\x2b\x1d\x49\xc1\x9f\x88\xbf\x5e\x27\x55\xfd\x96\xc2\x5b\x86\xb0\x7e\x43\xdc\xec\x4b\x7a\xa7\x94\x0c\x90\x9f\x10\xc6\xd1\xe9\xe1\x68\x74\x71\x78\xf5\xa6\x86\xac\xd4\x7b\x39\x5a\xc9\x6c\x88\xdd\x2a\x43\xc0\xa6\x53\x42\xa7\x47\xda\x98\xa8\x5b\xe7\x0a\xcd\x11\x16\x17\xfa\x34\x0b\xb2\xbe\x41\x0b\x2d\xb2\x19\x5a\xcd\x06\xc9\x63\xaa\x1d\x03\x01\x9b\x82\xb1\x97\x34\xd3\xab\xfd\x03\x7f\xc4\x5e\x2c\xd7\xb7\x7d\xd4\xeb\x9c\x1a\x4f\x4d\x7e\xa7\x6c\x7a\xaa\x6c\xf0\xca\xd7\x5a\x4c\xb8\x7a\x90\xef\x13\x33\xd4\x8b\xc6\x9e\xa1\x1d\xcd\x2c\xba\x33\xf2\x18\x57\x36\xcb\x6b\xb8\x6a\xe2\x23\xcc\x7b\x11\xf2\xee\xd0\xd4\x4c\x7e\xa0\x01\x24\x2e\x0e\xa7\x9f\x5c\xcd\x2a\x84\x81\x79\x94\x82\xe0\xb4\x94\x2d\xce\x98\xdc\x02\x07\x54\x21\xa8\xc4\x80\xb3\x82\x31\xa8\xdf\x15\xb1\x60\x5e\xbe\xa2\xe6\x39\xf9\x88\xc2\x28\x50\x0a\xee\xf6\xb6\xc7\x42\xeb\x04\x81\x5f\xe0\xf5\xe1\xd5\xe1\xe9\xf6\xf6\x3e\x6c\x6f\x2b\x48\xf0\x0b\x0c\xcf\x5e\x9f\x9b\x2f\x18\x9f\xda\x93\xb6\x1a\xc8\xbf\xc0\xf1\xc9\xab\xf7\xbf\x6e\x6f\x97\xab\xc5\xb0\x3c\x51\x85\x88\xd0\xa3\x00\x89\x5a\xf9\x5b\xb3\xa2\x85\xd5\x7c\xe7\xc0\x35\x73\x91\x5a\x42\x3d\xc3\x56\x80\x9a\xf5\xda\x16\x1a\x25\xf0\x14\x10\xc3\x60\x7f\x5a\xe9\x5c\xa5\xbc\x19\x07\xac\xda\xbc\x8d\xf7\x41\xe9\xa8\xce\x4d\x64\x6d\x18\x1f\x4f\x50\x1c\xc8\x54\x98\x2b\x95\xd3\x9e\xe2\xd5\xd9\x3f\xb7\x7f\x22\x7e\xa3\xfa\xbe\x89\x39\x11\xb7\x6b\x61\x63\x35\xbc\x74\x9f\x5a\xdf\xb4\xa7\x30\x5b\x58\x88\x6a\x5e\x9d\x6d\xe8\x16\xc0\xcd\x73\x71\x46\x15\xc2\x55\x73\x6f\x56\x29\x4b\xf0\x53\x71\x30\x18\x4c\x18\xeb\x8d\x63\xef\x0e\xcb\x01\xa2\x28\x98\x4b\xe2\x89\xde\x98\x50\xc4\x09\x16\x03\x6b\x78\xf6\x62\xad\x22\xf5\x42\x2c\xb9\xfa\x3d\xe4\xca\x1e\xac\x24\xed\xed\xed\x99\x3f\x11\x07\x03\x19\x46\x03\x89\x85\xec\x09\xd3\xe9\xc0\x8b\x85\x64\x61\xef\x81\x71\xdf\x63\x31\x95\x06\x0a\x6c\x6f\xab\xc1\x1c\x0c\x06\x83\x19\x0b\xf1\x20\x16\x7c\x10\x90\xf1\xc0\xb8\x2f\x7a\x21\x8a\x38\xf6\x63\x0f\x2f\x7c\xd1\xc3\x76\x34\x06\xce\x5a\x0c\xd0\x26\xc1\xdd\x28\x8d\x97\x91\xc4\xcb\x12\x4f\x0a\x71\x99\xad\xef\x10\x42\x14\x45\xda\xdb\x38\x71\xe3\x9b\x6b\x86\xd6\x1a\xd4\x3d\x0a\x62\x2c\xf6\x2b\x50\x74\xe7\x58\xa9\x64\x35\x7b\x71\x3f\x83\x8c\x36\xd8\xf4\x1b\x01\xf1\xa4\x89\x6a\x30\x50\x2b\x80\x2a\x9b\x70\x3c\x37\xee\x6d\x77\x2e\x7e\x78\x31\xd4\x12\x7e\x8c\x81\xdd\x63\xfe\xc0\x89\x94\x98\xf6\xe1\xc8\xd8\x7a\x5a\xea\x47\xf5\xe3\x37\x80\x09\x85\x01\x96\x9e\xa5\x96\x81\x42\x6b\xb0\xd7\x13\x44\x62\xad\xeb\x69\x46\xc0\x42\xbd\x16\x0b\xcc\xc1\x63\x3e\x7e\xb4\xb4\x98\x91\x7b\xfc\x78\x3f\x9b\x01\xb2\x92\x97\xad\xa2\x69\xa3\x8f\x8d\xdc\xe3\x8d\x7b\xd8\x94\x90\x22\x34\xc6\xe7\xf4\x35\x22\x41\xcc\x71\x1d\x67\x8d\x19\x0b\x30\x2a\x97\x62\xc5\x03\xa3\x22\xdc\x65\x98\xe2\x8f\x19\xd6\xe1\x24\x86\xb4\x35\x1c\xa7\x04\xd2\x29\xfc\x37\xc6\x4a\xfe\x01\x99\x54\x99\x92\xfa\x95\x39\x4c\x10\x09\xec\xe6\xe7\x76\x2e\x4d\xf9\x6a\x7a\x6f\xf5\x81\xe0\x6d\x1f\x46\x58\x6a\xa8\x92\xc1\xad\x9a\xbf\xdb\x7a\x8f\x89\x11\xb6\xe6\xa4\x23\x45\x89\x50\x1f\x47\x98\xfa\x98\xca\xd4\x3c\xb5\x78\xae\x65\xb7\x7b\x22\x16\x5e\xce\x0f\xea\x36\xba\x4a\x53\xaf\xc1\xd2\x53\xe4\xad\x43\xbc\x8c\x9d\x67\x2d\xbf\x77\x28\xba\xd4\x1b\x08\xec\xbc\xbb\xdc\x75\x6e\x00\x25\x68\xec\x7e\x5a\x01\x55\x83\x1b\x61\x7e\x8c\x8d\x1f\xf5\xfd\xf1\xeb\xce\x6a\xfc\xfb\xed\x99\x6a\xe9\x97\xde\x34\xc9\x3d\xfe\x16\xb7\xcc\xfe\xc7\x30\xd8\xb7\x3f\x92\x7b\x6c\x7e\x52\x9f\xd2\x5f\x6b\x9c\x6d\x1b\xd9\x6b\xd5\xa3\x85\xef\x5a\xf5\xf2\x7f\x65\x20\xb6\xb3\x88\x8a\x4a\xb9\xf9\x39\x59\x58\x25\x47\x84\x11\x19\xeb\x94\xd2\x7a\xe4\xa7\x49\x00\x52\xf9\xb0\xdb\x7b\x67\xfe\xe5\xc0\xad\xec\x99\x31\x9a\x47\x3d\x9c\x42\x74\x99\x8e\x89\x62\x93\xb5\xce\x4b\xd5\x91\xa9\x7b\x7a\xae\xb7\x15\xdd\x35\xb6\x75\x93\x9b\xa6\x7a\x77\x84\xf2\xc9\xaf\x66\xcc\xa2\x21\x98\x3f\x18\x77\x1a\x8a\x64\x56\x47\xc0\x7d\x1d\x3a\xec\x33\xa0\xac\xda\x4f\x04\x40\x6d\x5c\x2f\xa6\x7e\xa2\xc5\xd8\xb0\x12\xc2\xa8\x91\x44\x08\x04\x0e\x89\xc7\x02\x46\xfb\xf0\x2e\x0e\x24\x89\x2a\x8d\xfa\xcc\xe4\x24\x47\x3e\xce\x5c\x57\x8c\xcf\x28\xb6\x6c\xa8\xe4\x97\xc0\x4a\x77\xd1\x0a\x0d\x46\xde\xac\x06\xe6\x22\x22\x6f\x30\xd7\x7a\x15\x52\x6a\x91\x36\xfc\x4c\x64\x62\x4e\x1e\x0a\x4a\xa2\x08\xd7\x4d\x80\x66\x50\x73\x8a\x94\x92\xbf\x8e\x24\xd0\x68\xcf\xad\x5a\xac\xd4\x79\xd8\xb2\x8a\xfd\x16\x7c\xaa\x81\xb8\x95\x30\xe5\xd6\x01\x7c\x32\x7f\x12\x2c\xb6\x0e\xe0\xda\xfe\xf6\x62\x6b\xdf\x7e\x7a\x99\x7c\xfa\xf1\x9f\xfa\x9f\x9f\xb6\xaa\x24\xa8\x7a\x3e\xc0\x67\xf8\xbc\x36\x17\x92\x6b\xd4\x5a\xd7\x80\xa5\xf4\x0d\x68\xd2\x39\xa0\xad\xe7\xb1\xbd\xee\x61\x38\xe4\x77\xc4\x89\x1a\xf9\x53\x54\x40\x46\x79\x0c\x5b\xb8\x7c\x52\x35\xe4\x5d\xaa\x84\x18\x4e\xbd\xb7\x60\x8a\xe6\x7b\x05\xae\x3b\x4a\x66\xdc\xa3\x40\x19\x0d\x56\x75\xd6\x3b\x92\xc7\xc2\x10\x51\xff\x00\x6e\x47\x27\x57\x1a\xd6\x2f\x5b\x1a\xd0\xd6\x3f\x6f\x77\x1f\x7f\x92\x6f\x43\x29\x97\xb3\x86\xdb\xae\x41\xed\xfc\x17\x0f\x1f\xb2\xb1\x9c\xcd\xd3\xad\x04\xab\xc1\x5d\xdb\x1b\x42\x30\x8f\xe8\x00\x42\x93\xa7\x41\xf4\x31\x76\xdf\x40\x2d\xc1\xec\x0e\xcf\x53\x7f\xe6\x18\xcb\x07\x8c\x29\xbc\xd0\x9a\xd0\xcf\x3f\x82\x37\x43\x3a\x88\x83\x0b\x08\x18\x9d\x9a\xd8\x30\xfd\xb6\x52\xaa\x18\x0f\xcb\xc6\x6a\xd6\x6c\xc2\x82\x80\x3d\x28\x42\xe0\x78\x1a\x07\x88\x67\x24\xf6\x01\x44\x9f\x4e\x83\xcf\xd1\xa7\x53\xf6\xf9\xd3\xf3\xfd\x9f\x5f\x7e\x36\xf8\x55\x13\xc6\xea\x18\x5a\x7c\x4a\x60\xd6\x63\x78\x9d\xa2\x18\x7d\x3a\xfb\x7c\xd3\xfb\xa0\x30\xfd\xf1\x33\x9c\x31\x08\x19\xc7\x4a\x26\x97\x19\x5d\x3f\xbe\x74\xcb\x61\x77\x96\x64\x49\x7c\xb7\x3f\x4c\xc9\x3d\xa6\x7a\x59\x16\xa9\x76\x09\x8a\x8d\xc8\xf4\xd1\xfe\x9b\x0b\x0d\x63\x15\xf7\x4d\x45\xcb\x26\xef\xcd\x05\x99\x76\xce\x9b\xce\x79\x03\xdf\xa7\xf3\x46\x51\xf7\x51\x40\xd4\x94\xad\xc1\x77\xa3\xa0\x75\xfe\x1a\xf3\x3c\xd5\x53\x7e\x23\x06\xbb\x23\xfe\xf2\xa7\x3b\xe2\xff\xee\x8f\xf8\xff\x66\x8e\xd4\x15\x0e\x1f\x2f\xc8\xf4\x1b\x77\xa3\x46\x64\x6a\x7e\x89\xc8\xb4\x9f\x89\x88\xed\xdc\xa8\x35\x6e\x54\xab\x0f\xd4\xfb\xe6\xbe\x4b\xff\xaa\xd9\x13\x3b\xf7\x6a\xe7\x5e\xed\xdc\xab\x9d\x7b\xb5\x73\xaf\x3e\x75\xf7\xaa\xb2\x33\x53\xef\xaa\xf6\xac\x5e\x6b\x18\x1f\xd6\xe1\x58\x8d\x38\xd6\x72\x58\xe9\x14\x23\x9d\x0a\x58\xe9\x65\x2d\x17\x95\x0b\x3a\xdd\x02\xbc\xb6\x5e\x28\x25\x25\x99\xfd\x2b\xd9\x72\xb2\xf8\x99\xbc\x1c\x89\xa3\x1b\xe2\x97\x6e\x42\xc3\x89\x4e\x3c\x4a\xc4\xd9\xbe\x89\x6d\x64\x63\x78\x20\x41\x00\x42\x22\x2e\x5d\x9e\xae\xce\xfe\xa5\x76\xad\x5c\xd6\xeb\x23\x1d\x5f\x2d\x19\xb5\x1d\x83\x56\x32\x66\x03\x3d\xb7\x61\x44\xb5\x7b\x48\xf6\x78\xff\xa4\x03\xb3\x92\x8b\xb2\xb2\x71\xa3\x97\x52\xb7\xdc\xbc\xa3\x52\xbb\x8a\xae\xd0\x9a\x12\xf0\x8e\x12\x70\xcb\x58\x3b\x76\xac\x06\x17\x90\x68\x6a\x7c\x5f\x52\x22\x6f\x66\x24\x04\x11\x46\xd4\x74\x6e\xa8\xce\xb5\xfc\x6d\xb8\x96\x9f\xac\xb7\xd0\x49\xa4\xce\x61\x58\xfe\x74\x0e\xc3\xef\xde\x61\xc8\x74\xe1\x9b\xd7\xa6\xc8\xce\x5a\x1c\x38\xe7\x19\x88\xcb\xf2\x92\x29\xf6\xa3\x2b\x06\xce\x88\x37\xb3\xa2\xd0\xe0\x68\x74\xba\x71\xd5\xf4\xf8\x44\x44\x01\x9a\x2b\xf3\xdb\x95\xf2\xb3\x7b\xa9\xcf\x3c\x9d\x8f\x9e\x56\xc5\x11\xae\x6e\x81\x05\x5d\x01\xd2\x60\x53\x4e\x3c\x9d\x5f\xb6\x95\x5f\xb6\x8a\x4d\xde\x5b\xbf\xac\xc0\xd2\x2d\xd3\xb5\xb0\x1e\x8e\x74\xae\xd2\x32\x43\x46\x81\xf5\xc7\x7d\xc2\x6c\x41\x9b\x98\x73\x4c\xe5\x40\xfc\x37\x18\x08\x2c\x7b\xb6\x71\x7f\x26\xc3\x60\xb7\xa2\xd3\x93\x9c\xad\xa5\xeb\xdf\x68\x3a\xe9\xb9\xd6\x30\x09\xd0\xd4\xe5\x91\x59\xb4\x8e\x4e\x87\x7f\x2b\x47\xe9\xe8\x5f\xa7\xdf\xb7\x3b\xd4\x6d\xfa\x9d\x47\xb4\xf3\x88\x76\x1e\xd1\xce\x23\xfa\x45\x3d\xa2\xd1\x5c\x44\x88\xdf\x3d\xde\x13\x93\xc0\x59\xc9\x15\x53\xdd\xba\xd1\x17\x33\x1f\xa9\xa6\x6b\x70\xc6\x54\x0b\xbf\x9e\x4e\x43\xbe\x98\xcb\x19\xa3\x55\x1b\xdc\xdf\xa1\x24\x57\x5d\xb9\x1a\x25\x8a\xac\x18\x65\x3c\x53\x89\xab\x2e\x7b\xbe\xb9\x12\x17\xa2\x7e\x57\x8a\xcb\x3d\xdf\x6b\x29\xae\x9e\xb2\x67\x6d\xe5\xad\x7c\x8d\xad\x0a\x88\xcb\x56\xde\x2a\xab\xb1\x55\x03\xba\xab\xbc\x95\x7d\x9e\x46\xe5\x2d\x27\xb3\xb2\x45\xb4\x74\x21\x60\xdf\x9a\x47\x15\xa3\x6c\x23\xab\x6a\xaa\x65\x55\x4d\x6f\x57\x5d\xc9\x3c\xdf\x64\x90\x6e\x43\x31\x2d\xb3\xcf\x77\xc5\xb4\xbe\x25\xc7\x79\xa2\x3f\x76\x9e\xf3\xf2\xa7\xf3\x9c\x7f\xf7\x9e\xf3\x05\x1b\x65\x7d\xe5\x9d\xea\x4d\x1f\xa8\x2f\x78\x5d\xf4\xf2\xe9\x12\x5b\x56\xc8\x9a\x82\xbc\x55\x9a\x63\x2c\xb0\xd2\x10\xb3\x24\xf0\xce\x66\x7a\x21\xe8\x47\x73\xdd\x7c\x2d\x0c\xf9\x77\x75\x87\x2f\x11\xa6\x6c\xec\xec\xa7\x1f\xaa\xac\x77\x02\x13\x8f\xac\x3f\xf6\xec\x69\xb5\xe8\xab\xef\xca\xa2\x8f\x2b\x80\xae\x2d\x26\x39\xca\x72\xcf\x7a\xf4\xa4\x1c\x43\xae\xe0\x02\x48\x0b\x8e\xb2\x49\x96\x17\x13\xeb\xae\x8a\x74\x6d\x48\x98\x75\xba\x4c\x38\x0a\xb1\x52\xb1\x8b\x06\x7f\x62\xda\x47\xf3\x7d\xe8\xe3\xa9\xc9\xbc\xac\x00\xda\x19\xf8\xf6\x72\x1a\x3f\x0e\x08\x2d\xd1\xab\x96\x71\xc4\x8d\x12\x38\xab\x38\xe2\x6a\x5a\xd7\x38\xe2\x52\xdc\x13\x81\x81\xca\x75\xa5\x35\x46\x47\x85\xe8\xa3\x0d\x86\x11\x17\x98\xbf\x61\x31\xaf\xe3\x2c\x42\x25\x9e\x56\xb0\xba\xbb\xd1\x85\x50\xf9\xf3\x4f\xed\xb6\xc4\x62\xd7\xcb\x08\xe6\x77\xe8\x23\x09\xe3\x10\x68\x1c\x8e\x31\xd7\xbb\x22\x51\x42\x39\xc2\x5c\x5f\x1d\x02\xc8\xee\x76\x15\xd4\x64\xa5\x25\xc7\x3a\x9c\x10\xfb\x6a\x8b\x44\xea\xcf\x38\xd0\x47\x42\xd6\x70\xc1\x1f\xf5\x15\x77\x46\x2c\x53\x46\x7b\x7f\x61\x5e\xb5\xd1\x2a\x51\x07\x63\x3c\x61\x1c\xbb\xfb\x47\x38\x76\xec\x8c\x48\xa0\xb6\xf2\x43\xfd\x4b\xd2\xb9\xfd\xb5\xd2\x47\x23\x67\x1c\x89\x99\x0e\xe7\xc9\xa1\x24\xf2\x08\x99\xae\x7f\xb2\x53\xa0\x7e\xaa\xcc\x77\x4c\x2e\x5c\x81\x07\x42\x7d\xf6\x90\xce\x64\x12\x87\xf4\xe2\xf9\x9a\xaa\x64\x26\xeb\x7b\xc5\x24\x0a\xbe\x06\x61\xe9\x8e\xd7\x41\x56\x84\x82\x54\xb0\x9e\x28\x59\x2d\xac\xe1\xcb\x9f\x1e\xbf\x88\xeb\x39\xd3\x18\x3d\xe2\x44\xa3\xb2\x6d\xd3\x79\xc6\xba\x4e\x33\xba\xf3\x88\xee\x3c\xa2\x6a\x8c\xdd\x79\x44\x77\x1e\xf1\xcd\x52\x5d\x77\x1e\x51\x32\xea\xce\x41\x6d\x9e\xef\xf1\x3c\xc2\x68\x04\xdd\x71\xc4\xe2\x7b\x4f\xf5\x38\x62\xd4\x1d\x46\x74\x87\x11\xdd\x61\xc4\xb7\x7d\xb5\xc7\x62\xb9\x86\xd6\xb7\x7b\x54\x79\xa9\xb3\x31\x9d\xdf\xeb\xed\x1e\xb5\x97\x7b\x2c\xce\x69\xe3\xfd\x1e\x6b\x71\xc0\x77\xa7\x3b\xb9\x67\xf1\x74\xa7\x3b\xdb\x49\x57\x67\x39\x2f\xd3\xe5\x7a\xdc\x4c\x97\x8f\xf1\x33\x95\x36\x6e\xe5\x68\xba\xfc\x02\x71\xb3\x97\x5d\xc8\x6c\xe7\xa2\x2a\x4e\x4c\xe7\xa2\xea\x5c\x54\xdf\x2d\xd5\x75\x2e\xaa\x92\x51\x77\x4e\x0b\xf3\x3c\x69\xa7\xc5\x65\xe7\xb5\xe8\xbc\x16\x7f\x6f\xaf\xc5\xe5\xda\xcd\xe7\x4a\x05\x18\x56\x08\x9c\xbc\x74\x31\x93\xca\xbc\xa9\x18\x77\x75\xcc\xe4\x65\x17\x32\x09\x5f\xd6\xa8\xbe\xec\xac\xea\x15\xac\xea\xd1\x7f\x83\xf5\xd8\xd5\x06\xd0\xca\x96\x75\x45\xf3\x56\xb6\xf5\xe8\xbf\xc1\xc6\xe3\x38\x9e\xfc\x79\x97\x12\x3b\xbe\x8f\xfd\xea\x7b\x36\xd4\x63\x8e\xb8\x12\x17\x6a\xa7\x26\x3e\x71\x35\xd1\xb0\x45\xa7\x28\x96\x3f\x9d\xa2\xf8\xdd\x2b\x8a\x9d\xf2\x93\x7f\x2a\x94\x1f\x18\xfd\xeb\x74\x5b\x98\xcf\x4a\x4c\x54\xeb\x42\x55\xbe\x88\x54\x43\x5a\x46\x17\x5a\x87\xdc\xe8\x0a\x24\x3d\x91\x02\x49\x99\x1d\xa7\x2b\x91\xd4\x95\x48\xea\x4a\x24\x75\x25\x92\xba\xa2\xf1\x25\x79\x5f\x4f\xaa\x68\x7c\xb2\xfd\xa7\xa5\xe3\x47\x27\x57\xb6\x7c\xfc\x3a\x2f\xe6\x14\xba\xc6\x7b\x95\xa7\xa0\xdd\x9d\x9a\xa6\x4e\x7c\xbd\x95\x9f\x97\x8b\xaa\x57\x20\xf6\x0f\xe2\x27\x41\x41\x31\x25\xff\x8d\x31\xa0\x90\xd1\x29\xa0\xa0\xec\x30\xe4\x4f\x36\x76\x89\x4e\x7a\xa6\xa4\xdd\xea\x72\x80\x95\x00\xd2\x3a\x15\x12\x10\x71\x3c\x21\x1f\x9d\x75\x54\x02\x91\xf8\xfb\xea\x3d\x65\x3a\xdd\x2a\xa5\xb9\xe7\x5b\x51\xd5\x73\x65\xe5\x7b\x0a\x6c\x8f\xf8\xb7\xe6\xf2\x49\x73\x48\x4c\x68\x59\xf1\xfd\x12\xf8\x13\x82\x03\x1f\x26\x9c\x85\xc0\x74\x25\x69\x05\x4d\xe4\xc7\x6e\xb5\x11\x60\x34\x98\x43\x80\xa5\xbe\x72\x73\x07\xf5\xfe\xda\x87\xc3\xde\x7f\x76\xcb\x84\x8f\x49\x54\x12\xb0\xf3\xbc\xf7\x7f\xbb\xfb\x10\x53\x1f\x73\xe1\x31\x8e\x05\xec\xdc\xec\x1a\x1c\x67\xf3\x68\x86\xa9\x80\x9d\xde\xae\x76\x70\x51\x26\x4d\xe1\xfc\xf2\x28\x2b\xb5\xdb\x68\x89\x9e\x02\x03\xc6\x2d\x14\xeb\x08\xf5\x18\x15\x56\x33\x70\xd7\x85\xfe\xa8\xfb\xfa\xc7\xf3\x12\x88\xe9\x05\xa2\x2b\xfa\x71\x16\x2f\x8e\xad\x10\x13\xcd\xc2\xa1\x82\xa4\x8b\x56\x7e\xf1\x82\xd8\x1a\x23\xbc\xfe\x66\xd8\x1c\x65\x16\x9c\xcd\xb6\xa5\x2d\x81\xab\x35\x1b\x34\xd5\xb7\x98\x2a\x60\x41\x60\xc8\xdc\xb8\x0e\x63\xa1\xa9\xc1\xe3\x58\xbf\x60\x75\x78\x47\x9b\x39\xb0\x84\x0a\x89\xa8\x87\xed\x8d\xb4\xb0\xb7\x77\x87\xe7\x62\x6f\x2f\x4f\x64\x2f\x54\x27\xb9\xdb\x5d\x1b\xae\x9e\x95\x0c\xae\x2f\x5f\x1f\xc1\x8b\xe7\x3f\xfe\x23\xad\xe1\xfa\xf0\xf0\xd0\x27\x58\x4e\xfa\x8c\x4f\x07\x7c\xe2\xa9\xff\xd4\x1b\x7d\xf9\x51\xee\xa6\x18\x18\xd1\xb7\xb7\x97\x83\x68\x8d\x0e\xbd\xb3\xed\xc3\x38\x96\xfb\x40\xf4\x65\x11\x02\x53\xb9\xdf\x1e\xdd\x1c\xd0\xcc\x9d\xb4\xcb\xa3\x5b\x79\xf1\x6c\x8b\x2b\x67\x93\x95\xfe\x61\x59\xea\x66\x9e\xce\xf4\x5d\xa4\xef\x46\x32\xb5\x2d\xab\x08\x55\x93\xa7\x7d\x27\x71\x4e\x70\x2c\x58\xcc\x3d\xbc\x2c\x9a\xba\xcb\xa5\x51\x3c\x43\x21\xae\x42\x6f\xdb\x54\x95\xd6\xf2\xce\x3a\xb4\x2c\x6e\xb9\xe0\x53\x47\xe4\xc9\x0c\xef\x17\x43\x34\x0c\xd4\xb1\xd1\x62\xdd\x52\x7b\x01\x8b\x7d\xeb\x00\xe9\x7b\x2c\x1c\xa0\x88\x88\x81\x8f\x05\x99\xd2\x81\xeb\xe8\x46\x6f\xce\x7d\xd8\x83\xd7\x05\x61\x78\x1b\x71\xa6\xa4\x8b\xe8\x73\x3c\x25\x8c\x8a\xfe\x43\xc1\xae\x12\xb7\xfb\xb9\x09\xcd\x21\xed\x70\xcd\xc1\x9c\xd9\x43\xa5\xf4\x5a\x64\x97\xdd\x9a\xf4\x36\xf8\x64\x3f\xdd\x10\xff\xf3\xc0\x76\x3d\xf8\x64\x3e\x7c\x1e\x2c\xe0\x30\xf8\xe4\x7a\x52\x0d\xf2\x57\x05\xe8\x41\x65\x06\xe2\x48\x61\xb9\xa1\xe4\x85\x80\x6d\xb0\xfc\x50\x92\xce\x07\x9f\xdc\xc7\xe6\xe1\x6c\x2f\x4b\xa4\xca\x64\x09\xb1\x12\x0f\x8b\xa4\x5a\x34\xf8\x8a\x1e\x9f\xa4\x69\x0b\xb9\xef\xa6\x21\xed\x0f\x1e\x66\x4c\x60\xe7\x67\x41\x1c\x83\x88\xc7\x42\x12\x19\x17\x43\xdb\x92\x80\xb6\x74\x6f\xf8\xdd\xb4\x52\x2c\x9a\x81\xe8\x14\xa2\x88\xb3\x7b\xe2\x2b\x49\x33\xc3\xb4\x9a\xba\x88\xb0\xa2\x5f\x6a\xb9\xb4\xb4\x1c\x6a\x61\x6e\x34\x19\x18\x25\x26\x45\xa5\x42\xdf\xe0\xb5\x28\x5d\x8e\x2a\xd7\x41\x4f\x13\x6c\xe1\x2b\xad\x72\xe5\x5b\x57\x3b\x0f\xb2\x0b\xfd\x28\x3d\xf8\x38\x05\x54\xaf\x0c\xa7\xd4\xf4\x8a\x13\x3c\xc9\xfe\xe8\x84\x48\x42\x0b\x56\xed\x2a\x77\x17\xe0\x8f\x1e\xc6\x3e\xbc\x78\xfe\xf2\xa7\xc7\xab\x5a\xe6\x31\x53\xf7\xa8\x3b\xa4\x5e\x2f\xce\xfe\xc2\x14\x64\x1c\x25\x17\x48\xce\x84\xd3\x7b\x4c\xf7\xc6\xfc\xcf\xcd\x03\x70\xac\xa3\xaf\x4a\x1d\x51\x87\x56\xc9\x26\x42\x01\x61\x0f\x56\x8f\x8a\x22\x8c\xb8\xda\x18\x90\x84\x90\x29\xa5\x95\x66\x20\x6e\x6f\x0b\xe7\xe4\xaa\xd4\xda\x23\x85\x5b\x3f\x81\xaf\xfe\x54\x9d\x08\x12\x92\xc0\x80\x16\x73\x2a\xd1\x47\xdd\x1d\xd8\x1d\x27\xe2\x4c\xb2\x71\x3c\xe9\xeb\x89\x78\x87\xc4\x5d\x19\xce\x4a\x40\x5b\x0f\xc9\x3e\xa0\x6c\x07\x7a\xf0\x1c\x4f\x30\xc7\xd4\xc3\x46\xd8\xfe\xa5\x50\x37\xef\x68\x77\x4a\xa9\xfa\x67\x57\xca\xb2\x91\x1a\x9f\xd5\x1e\x41\xe0\x40\x07\xa3\xc1\x03\x8b\x03\x3f\xef\xfe\x41\x02\x6e\xf5\xd4\x86\x98\xca\xbe\x6d\x31\xb2\x0d\xfa\xaa\xe3\xd2\xf4\x8a\xc3\x40\xb0\xfd\xec\x34\x69\xfd\x28\x41\xdb\xad\x64\x5a\x96\x3f\xdd\x2f\xcc\x9c\x95\x99\xee\x7b\x4e\x1e\x12\x0a\x21\x8a\x12\x9d\x2b\x01\xab\xf5\xdf\x3b\x3c\x3f\x80\x3d\xab\x96\x5d\x6f\x6f\xdf\xe1\xf9\xf6\xf6\x07\xd8\x83\xea\x61\xd8\xbf\x4f\xf3\x4d\x4a\x31\x48\x81\x84\x88\xa2\x29\xf6\x8f\x4c\xdb\xfe\x86\xfa\x53\xaa\xfe\xf5\xf6\xb6\xb5\x31\xb7\xb7\x3f\x94\x74\xf4\x9b\x32\x07\xac\xc5\xab\x4d\x03\x4d\xb8\x5e\xe9\xf5\x47\x0b\xd3\x65\x21\x1f\x94\xf6\x65\xae\xa6\xfd\x8d\x8d\xfb\x61\x53\xaa\x4a\x69\x73\xe3\x7a\xeb\x67\xdd\xfd\xa5\x2f\x46\x73\x97\xd6\xd7\x6f\xae\x06\x5d\xd1\x57\x82\x6a\xe6\xdc\xfe\xfa\xf9\x87\x86\x97\x33\x91\xf0\xd7\xcf\xdb\x2e\x41\xda\x7c\xd2\xd0\x51\x66\x60\xf9\x4a\x55\x55\x9d\x0d\xd5\x4e\xa9\x16\x93\xe3\xc8\x98\x74\x96\x55\x4a\x69\x1d\xc1\x5f\x98\xb3\xde\x18\x09\xad\xe3\xfa\xb8\x82\x6f\x16\x11\x73\x55\x3c\xfa\x88\x4f\x2d\xf6\xe7\xda\xff\x80\x93\x93\xc2\xfa\x91\xa7\x5b\xe6\xb2\x04\x9c\x9d\xfc\x69\xf5\x12\x59\xda\x29\x78\x07\x57\xef\x2c\x4f\xc1\xb5\xfc\xa9\xc4\x5a\x09\xfc\xa1\xd4\xa6\xa9\xf1\x93\x40\xc4\x84\x20\x63\x13\x2e\x96\x6c\x1b\xe4\x2f\x6c\x44\x93\xb2\x41\x0b\x4b\x58\xe6\x5d\xd2\x4c\x4b\x38\x60\x2a\x09\xc7\x72\x6e\x63\xc7\xb5\xa7\x87\x50\x9f\xdc\x13\x3f\x46\x81\x02\x99\x68\x90\xda\xbb\xe4\x7e\x29\x83\xd9\x9a\x82\xfa\xf9\x6d\x26\x27\x86\xeb\x37\x3d\xad\xc7\x12\x7a\x8f\x02\x25\x39\x7a\x6d\x45\x5d\x09\xd0\x5e\x13\x6d\x2e\x3a\x31\xbf\xcf\x5b\x1b\x8b\x66\x74\x0b\xc0\xf5\x26\xb5\x79\x2a\xfc\xac\x89\x16\xad\xfb\x35\x46\x76\x94\xfb\xce\x79\x49\x4b\x86\x63\x4d\xbc\x3b\x3c\x37\xbe\x96\x08\x29\xa0\xce\xb9\x95\x05\xa4\x69\x76\xdf\xde\xef\xa4\x68\x26\x42\xa2\x1c\x66\xc1\xf4\x59\x34\x69\x16\xcc\x98\x34\x19\x75\xc1\xd7\x63\x9e\x9c\xdb\xd4\x43\x11\x91\x28\x48\xdd\xa7\xda\x73\x5a\xf4\x93\xaa\xd1\x64\x7c\xa5\x25\x40\x13\xef\x69\x68\x15\x6f\x7b\xf1\xa8\x75\xf6\x18\x78\x06\xb7\xd0\x56\x5f\x0a\x30\x9d\x1a\xcd\xf0\xa7\x8d\x78\x41\xcd\xa3\x79\x71\xc1\x5f\x64\x27\x77\x89\xc8\xbe\xdf\x13\x38\xd5\x6f\x36\x59\x66\x35\x30\x2a\x4c\x9d\xb4\x05\xf0\x38\x13\xde\x16\x45\x01\x71\x01\x6e\x44\x64\xd4\xf3\xb2\xb5\xd1\xd4\xb6\xd1\x90\x40\x8e\xa7\xe5\x5b\xec\xf2\x47\xf7\x97\x0a\x54\xfd\x8b\xed\xa7\xb9\x1a\x58\x6e\xbe\x33\xb3\x6c\x54\x06\x35\xdd\x78\x1a\x2b\x6b\x25\x3d\x40\xfe\x52\xa7\xf9\x7a\x32\x57\x3e\xcd\xb7\xad\xd7\x7a\x9a\x7f\x59\x83\x11\x54\x0b\xd3\xcb\x93\x97\x65\xd3\x98\x04\xee\x58\xd6\xac\x3b\x7e\xcf\x89\xcd\x6d\x61\x49\x59\x8b\x11\x53\xbc\x4d\x8b\x9b\x10\x49\x6f\x66\x1d\x72\x53\xfc\x11\x2a\xd3\xbb\x41\x6f\x56\x22\x55\x2b\x76\xb4\xd3\x49\x1f\xe1\x6b\x20\xd6\x15\xa5\xe5\x57\x3c\x99\x10\x8f\x60\x2a\x77\xab\x23\xc8\xba\x53\x6a\xa3\x7e\xad\x87\xf7\x7f\xaf\x3e\xf0\x5d\x81\xf9\x6b\xa0\x35\x72\x3f\x4a\x22\x79\x9c\x6b\xa4\xee\x2a\xbf\xb5\x8b\x80\x9a\x93\xef\x66\x09\x50\xb7\x22\xb0\x9a\x00\xa8\x99\x4a\xa8\xe6\xff\xd3\xd2\x29\x4c\x0e\x56\x52\x6f\x5d\xc7\x5e\x85\xf7\x13\xbb\xa1\xc5\x19\x6e\xd1\x2d\xef\x9a\x96\xbd\x52\xc1\x3c\x25\x4d\x2a\x96\xb4\xd8\x34\x5b\x86\x97\x50\x73\xa4\x51\x08\xa2\x6e\xb9\x86\x55\x74\x5d\x30\x9c\x8a\x53\x5f\x23\x62\x0a\x13\x73\x94\x07\x54\xf5\x72\xc3\x14\xd5\x43\xa9\x0c\x73\x4d\x5c\x89\x26\x8c\x72\xc6\x98\x50\xca\x1c\xe2\x53\x2c\x13\x7f\xa3\x8d\xb6\x58\xe4\xb3\x44\x2c\x85\x58\x22\x1f\x49\x64\xe3\x36\x1c\x50\x22\x00\x2b\xe6\xd2\x56\xad\xf5\x01\xeb\x40\x77\x9d\x41\x5b\x16\xc0\x41\x84\x49\x9f\x96\x85\x83\x8f\x25\x16\x0c\x6a\xe4\x58\x0f\xea\xec\xdb\x3a\x11\x96\x6b\x57\x6d\x52\x56\xee\x29\xed\xc3\x9d\xda\xda\xa6\x39\xea\xa9\xb4\xd7\x6b\xe2\x76\xdc\xf2\x1a\x07\x65\xdf\x81\x32\xaa\xcb\x0c\xdd\xe3\x8a\xa0\x1d\xc8\x84\x4a\x68\xe5\xa4\x4c\x4e\x2e\x25\x21\xff\x62\xb4\xf4\x76\xf8\xe5\xa6\xe2\x3f\xe5\x5e\xa0\x9a\x90\x0f\xed\x87\x7f\x98\x61\x9e\x39\x9c\x8e\x38\xf3\xb0\x10\x2e\xc4\xb3\x62\x67\xbd\xca\x59\x37\xe0\x33\x2c\xb4\x72\x86\x26\x13\xec\x19\x42\x37\x5c\x90\x39\x05\xb2\xf3\xdd\x87\x61\xf9\xf5\xf2\x31\x4d\xdc\xf8\xfb\xe9\x31\x81\x6d\x3c\x21\x5c\xa4\x1c\xa9\xa7\xdd\xb9\xe2\x45\x39\xcf\x9b\xc7\x7a\x05\x1e\xb5\x44\x79\xcf\xf9\xea\x92\xee\x5d\x0e\xce\x8a\x82\xae\x16\x48\x21\xea\xd9\x4d\x97\x16\x6e\x44\xb8\x81\x14\xa3\x6e\x36\x29\x68\x4a\x9c\x3c\x3d\x9b\x19\xb0\xbc\xf4\x39\x2b\xf5\x39\xad\x28\x32\x4a\xdd\x4f\xad\x04\x86\x76\xe2\x98\x48\x3c\xb5\x87\xd8\x38\x3f\xaf\x74\x45\xcc\xa3\x5b\xb8\xf8\x28\xb5\x1b\x5b\x77\x78\x14\x61\xea\x2b\x3a\x46\xc0\x11\xf5\x59\x68\xac\x9b\x8f\xcd\xce\xa2\x82\xbb\x48\x29\x72\xbc\xe7\x21\x81\x73\x01\x77\xe5\x1e\x23\x1b\x48\x57\x0a\x55\x07\xd7\xd9\x24\xe1\x29\x49\x82\xb3\x0d\xd0\x85\xb0\xbb\x34\xc8\xce\x86\xd5\x95\xc2\xac\x0a\xb5\x7b\xa9\xb1\xf9\xf1\x1f\xb5\x2e\xa5\x25\x45\xa9\x57\x99\x23\xd7\xb0\x41\x15\x89\xa4\x2a\x91\x6d\x35\x3e\xad\x04\xd7\x82\xdc\x1a\xae\x51\x58\x7b\xbe\x12\x8a\x25\x13\x1e\x0a\x1a\x32\x0e\x5b\x25\xc0\x15\x2b\xe7\x14\x41\x37\x35\x5b\x61\x92\xdb\xf6\x51\xa5\x13\xa6\xcd\xb3\xb9\x88\xda\x30\x62\x01\xf1\xe6\x99\x90\xb9\x4a\x3b\x22\x71\x2d\x27\x7b\x9e\x53\x2e\xd2\x9d\xd2\xf6\x83\x81\x4c\x8c\xb3\x50\x9f\x57\x54\x1b\x38\x42\x6d\x90\x58\x56\x19\x65\x2b\x98\x64\xcd\x26\x2b\xd8\x31\xaf\xd3\xb0\xb2\xe6\x90\x86\x5b\xf3\x7e\x55\x7c\xba\x2e\xa6\x94\x59\x23\xbb\x28\xda\x77\x65\xf6\xb5\x1a\xa0\x90\x2e\xc8\xb9\x12\x9b\xb9\x38\x31\x61\x93\x18\x35\x54\x13\xf1\x45\x7c\x2d\xa2\x82\xc5\xf8\xc4\xc5\x67\xc7\xc4\xb6\xed\x6a\x47\x95\x76\xa1\xf5\x33\x69\x8c\x7b\x70\x9b\x8d\xdd\x34\xc1\x15\x28\x22\x42\x87\xf4\x79\x2c\x8c\x62\x89\x07\xf7\x2f\x06\x49\xd4\xd9\x75\x1a\x75\xf6\x21\x13\x75\x76\xed\x02\xba\x6f\x4c\x7f\x1f\x06\x99\xd9\xd0\xb3\x4a\xb0\x6a\xac\xa7\x45\xb5\xbd\xad\xc5\x7a\x2f\x13\xe7\xf6\x65\x7a\x3c\x63\x12\x67\x42\x62\xcc\xf2\x15\x0a\x69\x0a\xb3\xc3\x6a\x6c\xf4\x0a\xb8\x84\x9b\x5a\xc8\x06\xbd\x8a\x34\x02\xf3\xac\xe4\xb8\x70\xc4\x9b\x46\xb1\xd4\xf1\x43\x2f\xa1\xaa\x83\x04\xed\xc1\x61\x61\xc6\xea\x28\xdf\x9e\x5c\x1e\x2c\xc6\x82\xb9\x07\x53\x8f\xcf\x35\x47\xac\x5b\x40\x9f\x14\x20\x6f\x40\x3e\xb7\xec\xa2\x42\x3c\xa7\xad\x41\x60\x29\x09\x9d\xa6\x8e\x2b\xc7\xdb\x5f\x56\x42\x4e\x3d\x7c\xe1\xbf\x0d\xc5\x5b\x3c\xaf\xd2\x4f\xdd\xb3\x9a\xa8\xfc\x35\xdf\x41\x5b\x99\x99\x17\x99\x47\x01\x8b\x7d\x78\xfb\x6e\x04\x77\x78\x9e\xa4\x65\xc7\x42\xab\xa3\xb5\xc4\x78\x71\x0c\x3e\x11\x77\x19\xa2\xd3\x13\x8e\x82\x20\x89\xd0\x4f\xa2\x79\x1a\x56\x00\xbe\x3c\x07\xea\x71\xdf\x85\x62\x70\xa4\x90\x67\x6f\x71\x2b\xce\x13\x38\x98\x9c\x12\x7a\x57\xc1\x7d\x7e\xc4\x08\x95\xeb\xe7\xbd\x2c\xdc\x8d\x70\x5e\x8b\x0e\x2a\x68\xe8\x82\x71\x39\x70\x23\xcf\xab\xa4\x96\xff\x6a\x4a\x04\xd4\x99\x46\x1b\xe3\x4b\x4c\x15\x98\x37\x52\x46\x0a\xf7\x43\xcf\xc3\x55\x05\xa8\xcd\x63\x26\x74\xcc\x58\x80\x4b\x23\xd1\x52\x74\x0b\xab\xb6\xd8\xcd\xf2\x2c\x3a\x9c\xe8\xe1\xee\x5b\xac\x75\xf0\x3f\x20\x0d\x2c\xcd\x13\x25\xf5\xdb\x5f\xc4\xb8\x14\xc0\x72\x9c\x68\x32\xb5\xf0\x47\x89\x39\x45\x01\x18\xae\x10\x7d\x38\xb6\x55\x7d\xaa\xef\xdb\x34\xcf\x04\x05\xa2\xa2\x8c\x8f\x9b\x8d\x15\xd8\x79\x66\x67\xab\xc5\x7a\x34\x94\xd7\x58\xbe\xc0\xc6\xaa\x52\xd8\xad\x70\xdd\xe2\x72\x8c\x7c\xa5\x57\x36\x4a\xb3\x3c\x15\x14\xb3\x47\x42\x14\xe9\xba\x0b\x8c\xcb\xec\x9b\x8d\x8b\xf5\xfe\xf2\x54\xf4\xe1\x0f\x12\x04\xc6\x29\xa0\x83\xbe\xa2\x38\xd0\xee\x67\x32\xb1\xb4\x75\xa3\x66\xff\x46\x01\xbf\x41\x4d\xd4\xaa\xcd\x0e\x35\x96\xb5\x92\xc0\xd4\xc3\x39\xb9\xb4\x3e\x11\xfa\x6b\x01\xf2\x06\x84\x68\xcb\x2e\x6a\xb6\x62\x31\x43\x1c\xfb\x70\x64\x74\x7e\x38\xa1\x53\x42\xb1\xb3\x35\x9d\x52\x53\x39\xdf\xa5\x7b\x2f\xfa\x4a\xba\x0f\xa1\x46\xb0\x0c\x2f\x34\xd9\xaf\x5f\xba\x0e\x73\x1d\x3c\x42\xae\xd6\xa9\x2b\xb5\x0c\xf0\x90\xb0\x93\x3e\x12\x70\x23\x86\xe1\x85\x12\x3d\x1c\x0b\xa1\xe4\xe9\xab\xb9\x2b\xfe\xbf\x9f\x24\x39\xd6\x82\x75\xd1\x0c\x1c\x2b\x41\xe4\xd9\x64\xc9\x52\xe8\xc6\x69\xa7\xf0\xa8\x85\xa8\xd1\xc3\xd1\x0c\x87\x98\xa3\x20\x95\xf9\x59\x50\x80\x84\x20\x53\x6a\xcb\x31\xd4\x57\x43\xc8\x26\x60\x6a\x77\xff\xad\xc3\xee\x86\x44\x37\x6a\x46\x6e\x13\xec\x95\x16\xe0\x21\xe3\x8b\xac\x05\x39\xc6\x56\x0a\xf9\x9a\x90\x45\x3c\xa6\x58\x3e\x30\x7e\x97\x7c\x6d\xff\xb6\xa3\x56\xcb\xc6\x26\x93\x5e\x9b\x95\xf2\x71\x84\xa9\x8f\xa9\x32\x47\x13\x83\x32\x29\x42\xe3\xbb\x10\x2d\x2d\xf3\x74\x20\xec\x03\x91\x33\x16\xd7\x6d\x2e\x50\x3e\x8d\x1b\xd3\x6f\x05\xe6\xf7\x98\xbb\x9a\x7b\x0d\x8d\xdc\xa1\xe3\x37\xb4\x89\xbe\xb3\x28\xb7\x65\x64\x6d\xb9\xe4\xe5\xa4\x1b\x35\x60\x2a\x5d\x39\x12\xe4\x57\xfb\xcd\xc0\x04\x6b\xe6\xf9\x7f\x47\x60\x0c\xd7\x17\x19\x07\x83\xfb\x2d\x81\x9f\xe6\xda\x2e\x24\x60\x3a\x6f\x8d\xcf\x3c\x31\x10\x92\xa9\x39\xe8\x71\xac\xd0\xb9\x57\x1f\x1d\x88\x67\xce\x9f\x82\xa8\x7f\xe3\x3a\xb8\x71\xbf\xee\xd6\x84\x2f\xad\x48\x46\x96\x77\xd6\x6f\x82\x9e\x19\xc0\x2b\xba\xeb\x0a\x2b\xe8\x38\x5e\xf3\x63\xed\xba\x69\xa7\x9e\x92\x13\x21\xf2\x66\x66\x93\x0c\xc3\x98\x12\x97\xfe\x99\x1e\x47\x64\x32\x80\x14\x57\xd7\x42\x4d\x85\xce\x4d\xcc\x89\xde\x23\x28\x26\x3a\x99\xe0\x36\xf3\xc3\x2d\x50\xc6\xe1\x36\xff\x76\xbd\x6b\x8b\x08\x28\x1c\x61\x6e\x59\x56\xde\x4a\x46\xed\x52\xe2\x2c\xed\xd5\x16\x56\x33\x33\xa0\xb3\xc9\x89\x04\xfc\x91\x08\x99\x1b\x34\x82\xad\xa3\x58\x48\x16\xc2\x48\x63\xe9\xd6\x69\xab\xde\x51\xa9\x89\xff\xbd\x4e\x65\x1a\x25\xa3\x13\xad\x09\x3e\x6d\xb2\x5b\x6f\x3b\xa8\x85\x63\x3a\xf0\x3e\x09\x40\xd9\xd5\xe9\x67\x71\x10\x28\x65\x75\x1f\x22\xc4\x25\x41\xea\x8f\xe1\x3e\xa8\xfd\x60\xc6\x78\xbd\x3c\xd6\x9e\x8b\x4d\x78\x58\x5d\x8a\xf2\x34\x60\x63\x14\x0c\xec\xba\xad\xe8\x3c\xad\x00\xa6\x1a\xb8\xcf\x4f\xc8\x4b\x69\xf9\x73\xd0\xcc\xe5\xed\x1c\x24\xa0\x6f\x2d\xf0\xf1\xaf\x9c\xc5\xd1\xe1\x64\x42\x28\x91\x2d\x94\xc3\xc6\x5d\xaa\x28\x94\x8a\x5d\xb4\x69\xba\x06\x8d\x7f\x99\x7e\x2b\xd4\x52\x05\x02\x34\x0c\x70\x40\x8c\x3e\xc4\x02\xdc\x93\x98\x22\x5a\xcf\x05\x4e\xc5\xdc\x80\x22\x52\x1f\x02\x69\x9e\x5e\xba\xbc\x35\x6f\xb5\x31\x20\x20\x4b\x2a\xf5\xaf\xb5\xde\xbb\xa0\x86\x54\x1a\x9a\xd5\x95\x3f\xb3\xa5\xf7\x50\x76\x91\xe0\x5a\x21\xdf\x00\x14\x60\xaa\x57\xda\x31\x5c\x5b\x41\x9b\x30\xf2\x40\xa9\xd9\x4a\x72\x25\x33\xd5\x20\x79\xc1\xd5\x1c\xcb\x7a\x82\x5c\xf8\x81\x2b\xbd\xc2\x68\xa9\x2c\x6e\x04\xec\x64\xb5\x42\xc6\x8e\x6c\x63\x42\xf9\x2f\x46\xb1\x18\xc4\xa2\xe7\x29\x6d\x0f\x05\x2f\x7a\x28\x33\x09\xfa\x63\x4f\xa3\xd0\x7b\x51\x2f\xa9\xa1\x4e\x5a\xaf\xbb\x97\x5c\x83\x3a\x39\x0f\xab\xb3\x29\x2c\x29\xef\xa1\x4a\xe6\xb7\xe4\x8c\xf6\x92\x3f\xe2\xe4\x1e\x49\x3c\xbc\xb8\xff\xf9\x57\xbd\xdc\x6d\x5d\xaf\x4b\x1f\x21\x97\x77\xf4\x85\x76\x81\x8a\xde\x4f\x68\x1c\xae\xa8\x26\x2b\xa4\x94\x84\x51\x20\x9d\xef\x57\x7b\x7b\xda\x1c\xab\x00\x5c\xb8\xfc\x4e\x1b\x67\x0e\x17\x97\xc3\xdf\x0f\xaf\x4e\x6e\x86\x17\xbf\xff\x7c\xf3\xeb\xf9\xf9\xaf\xa7\x27\x37\x87\x47\x47\x27\xa3\xd1\xcd\xfb\xb3\xd1\xc5\xc9\xd1\xf0\xf5\xf0\xe4\xb8\xae\x14\x20\xc0\xf0\xec\xcd\xc9\xe5\xf0\xea\xe6\xf5\xe5\xf9\xbb\x9b\xd1\xfb\x57\x67\x27\x57\x7f\x9c\x5f\xbe\xdd\x87\xf3\xf7\x57\xaf\xce\xdf\x9f\x1d\xef\xc3\xab\xe1\xf1\xf0\xf2\xe4\xe8\x6a\x78\x7e\x76\x78\xba\x01\xad\x06\xd3\x38\xac\xdf\x8c\xda\x0d\xb4\x16\x44\xc5\x38\x6b\xdb\xb8\x29\xa8\x7d\x29\x37\x3b\x95\x6f\x72\xac\x2c\x7f\xad\x21\x6f\x4e\x5f\xba\x5c\xec\xe4\x0b\xf1\xca\x72\x3d\x57\xe8\x4c\x19\x20\x79\xa5\xc9\x63\x54\xc4\x61\x93\x4e\xf0\x1f\x05\x25\x3b\xd1\x1b\xd0\x9d\xda\x6a\x3c\x06\x63\x9c\x19\xd1\x95\x9a\xe4\x8d\xa9\x3f\x47\xa5\xfd\xb5\x85\xb1\x19\x02\x28\xc7\xa9\x41\x7c\x42\x9d\x08\xb5\xe2\x33\xb3\xc4\xb6\x0a\xb5\xea\xa7\x71\x7b\x5b\x10\x9f\x57\xff\xbe\x38\xc9\x09\x4a\x38\x3b\xbf\xb9\x3c\x19\x9d\x5c\xfe\x7e\xa8\xd8\xb9\x5e\x70\xaa\xe7\xf0\xec\xdf\xb9\x06\x60\x61\x1d\x65\xbf\xdd\xa0\x7e\xd0\x24\x39\x41\x4b\xa8\xe2\x38\x1b\x1b\xe4\xa7\xa1\xf1\xf5\xc2\x2c\x34\xbe\x5f\x36\x49\xb5\x8d\xee\x70\xad\xb0\x84\x47\xb1\x4e\x53\xc0\x40\xa5\xb8\x3a\x62\x9c\x63\x11\x31\xea\x27\x57\xfd\xe9\xb4\x01\x85\x6e\x23\xe5\x14\xc8\xd8\xa9\x6d\xf5\x0a\xc0\x23\x68\xa5\x29\x35\xcd\x3c\x6d\x12\xd4\x1c\x22\xcb\xa4\xa9\x99\x67\xd9\x79\xac\xad\x42\x9b\x3e\x5f\x7a\x2a\x97\xce\x73\xcb\x36\x6a\x9b\xed\x66\x9e\xc6\x9c\x37\xf3\x2c\x41\xfc\xed\xd3\x4b\xcd\xa3\xe6\x95\x78\x4a\xf5\x65\x31\xad\xa8\x11\xbf\x14\x12\xc5\x62\xbc\x39\xf8\xcb\xeb\x0c\x4a\xad\xbe\x4e\x8a\x75\x5b\x6c\x95\x6e\xad\xc0\xd5\xd8\xe1\x2e\xe2\xd1\x18\xe2\x1e\xa3\x1e\x8e\xa4\xfe\xa0\x0f\x93\x08\x9d\xba\x13\x29\x31\xb0\x50\x7b\x16\xaa\x78\x66\xbf\xb8\x71\x5f\xdc\x10\x7a\xe3\x00\xd6\xdb\xed\x3b\x28\x10\x0c\xb4\x7b\xf6\xf7\x77\x3a\x78\x10\x2e\x02\x44\x31\x10\x1f\x53\x49\xe4\x7c\x79\x94\x09\x4a\x7f\xea\x45\x9c\x50\x8f\x44\x28\x10\xcf\xee\xc3\x9b\x02\x9e\x1a\xc7\x9b\x48\xf5\x77\xe3\xfa\xdb\xad\xc7\xd7\x05\xdd\x26\x33\xec\x7c\x0d\xbf\xbf\xcb\x1c\xb6\x48\xe6\xac\x19\x63\x28\xd5\x82\x34\x71\x6a\x17\x01\x92\xba\xfa\xa9\xc5\x51\x18\x2f\x3d\x93\x45\xff\xfa\xb5\xb5\x63\x6b\x61\xda\x73\x07\x77\x3b\x7b\x7b\x32\xc8\xb9\x63\xcc\x18\x16\x97\xdb\x82\x2d\x4e\x67\xfd\xcc\xd5\xa4\x25\xb9\xe7\x0b\x3b\x81\x87\x28\x1c\xb4\x66\xb7\xc4\x1b\x80\x43\x44\xaa\x5d\x46\x79\xf1\x30\xf2\x58\xd4\xe6\xf2\x9b\xa6\x0d\xa6\x56\x46\x98\x4e\x56\xb4\xc0\xdd\xad\x55\x05\x0a\x01\xd1\x04\x14\xec\xe1\xb2\xbd\x05\x47\xd7\x6f\x2d\x9c\x7a\x25\xfc\x60\xba\xaa\x2b\x5c\x94\x3e\x63\x24\xcc\x5d\xbe\x0a\x29\x8d\x84\xa9\x0a\xf8\x80\xe6\x22\xe9\xec\x00\xf6\xa0\xc6\xc9\x86\x62\x39\xb3\xa4\x1d\x0b\xcc\x1d\xdd\xf6\x39\x46\x7e\xe3\x79\x7d\x33\x64\x1f\xdf\x0b\xc9\x38\x9a\x62\x0d\xf1\xe6\x81\x93\x06\x76\x6c\x86\x69\xef\xbb\xea\x6b\x58\x86\xf3\xeb\x4f\xf4\xcc\xd4\xe8\x62\xa5\x79\xf9\x90\x96\xe9\x73\xf7\xd8\xe9\xb7\x94\x9c\xad\x85\xe8\x4a\x95\xb6\x99\xdb\x31\x99\x9a\x4a\xf9\x7b\xf5\xf1\x19\xcd\x60\x34\x87\xf7\x91\x1f\x12\xda\xd7\x9f\xd7\x05\x52\x1f\x98\x2f\xb5\x96\x93\x38\x08\x6e\x3c\x46\x25\x67\xc1\xc6\x8e\xaa\x96\xd0\x91\x96\xd5\x8f\x5a\xe8\x46\xad\xf5\xa2\x25\x75\xa2\xe4\x7c\x74\x03\xfa\x50\x02\x7b\x3d\x47\xf1\x99\xf8\x9b\x86\xd8\xc2\x31\x5e\xe9\x34\xbe\x16\xa6\xce\x92\xca\x1d\xc5\x37\x9e\x0d\x57\xe7\x44\xb8\x67\x93\x67\xc3\xb1\xe8\x61\x24\xe4\x8b\xec\x19\xb8\xfa\xfc\xfc\x91\xc7\xc4\xab\xc3\xd5\x2f\x3d\xc1\xa3\xe4\x56\x84\xda\xfe\x4c\x41\xa2\xaa\xab\xfb\xed\xef\xab\x28\x0e\x57\xa8\x26\xcc\xb2\x39\x10\x49\xe1\x64\x83\x8f\x16\x02\x8c\xea\xb5\x7c\xad\xe0\x5f\x21\xbd\xc7\xa5\x6d\xda\xea\xa1\xda\xfa\xed\x31\xde\x93\x68\xda\x73\x33\x2f\x9e\x29\x74\x36\x10\x52\xb4\x0e\x51\x2d\x6a\x2f\x47\xfa\x7a\x92\xba\xaa\xec\xc2\x92\xdd\xb6\x2b\xc3\xe0\x9e\x3a\xe9\x9c\xa9\xc7\x20\x17\x08\xae\x4d\x40\x40\x72\xc8\xab\xb3\xf7\xb0\xdf\x87\x73\x1d\x1c\xac\x8f\x7c\xf5\xd1\x3e\x16\x72\x1f\x88\xae\x06\xe0\x4e\xfa\xeb\x0d\x15\x13\x29\xbb\x65\x22\x59\xb6\x5c\x8e\x9d\x52\xca\x98\x29\x53\x62\xe2\x8f\x29\xa3\x3d\xf3\xce\x32\xb9\x7a\xfb\xb6\x80\x83\x51\xb7\x35\xf2\x11\xf1\xee\x00\x99\xa9\x70\x61\xba\x89\x0b\xa8\x69\xfd\x0b\x2c\xea\x90\xd5\x93\x30\xc5\x32\x9d\x01\x33\xd3\xaa\x3f\xa3\x4c\x37\xed\x7d\xf6\x32\x8f\xb6\x3b\x94\xda\x85\x9a\x1c\x6e\x1b\x38\x28\xbf\x56\xff\xac\x9a\xf0\x99\x03\xa1\x5e\xcb\x1e\x88\x4f\xd6\xbb\xc9\x10\x4a\xd4\xcc\x91\xbf\xcc\x91\x80\x0e\x20\x6e\xb8\xfd\xaa\x4e\xb8\x2f\x04\x8d\x97\x40\xaf\x68\x5a\xc5\x8e\x47\xe6\xa6\xaa\xec\x6d\x77\xc0\xa8\x29\xdc\xa3\x23\x1f\xd0\xa4\x2e\x1e\xd9\x46\xf3\x13\xa1\x94\xa4\x28\xc0\x9a\x17\xb3\x41\xe2\x06\xa6\x2e\x11\xac\x55\x16\x1e\x53\x5d\x48\x08\xd5\x86\x39\xbb\x90\x68\xb5\xb1\x62\x13\x82\x21\xcc\x25\x7c\x1e\xa2\x20\xb1\x90\x9a\x15\x7d\x5d\x2e\xfd\x96\xb3\x00\xdf\x26\x01\xad\xd5\x9b\x2b\xd3\xbd\xeb\x3b\xee\x1c\x52\xa6\xd6\x9a\xc1\x06\x74\xa5\xf5\xa4\x43\x7d\xbf\x94\x98\xb1\x87\xea\x30\xfe\x31\x0e\xd8\x83\x2d\x96\x7e\xeb\xc5\x3c\xb8\x85\x9d\xb9\x45\x52\xbb\xb8\x62\x81\xe1\xf6\x61\x8a\xe5\xed\xee\x01\x5c\x9e\x9f\x9e\xfc\xf2\x3f\x3b\xea\xbd\x4a\x88\xbd\x37\x49\xec\x70\xef\x75\x80\xee\x19\x3f\x30\xce\x24\x6d\xd3\x1c\x0c\x06\x6e\x94\x8e\x55\xdc\xdb\x8a\x65\xdc\x16\x3b\x40\x52\x72\x32\x8e\x25\x16\xa9\x4f\x4c\x4d\x52\xb5\xaf\x86\x4c\xe0\xfa\x1a\xb6\xfe\xe7\x93\x42\xf2\xf3\x16\xfc\xf2\x0b\x6c\x6f\xbf\xd3\xd3\xb2\xbd\x0d\x1f\x3e\xfc\x53\x09\x28\x0a\xfd\x7e\xdf\x4d\x56\x63\xaa\x16\x32\xf4\xa8\xdb\xe0\x40\x60\xfd\xc1\xce\xaf\x6b\x9c\x7b\x69\x42\xaa\x78\x6f\x85\x1d\x7d\xa9\xdd\x7c\x19\xa3\xab\x61\x1b\x6f\x75\x64\xfd\x98\x23\xc7\x65\x98\xbe\xed\x49\x6d\xca\x0f\xaf\x49\xd0\x70\x3e\xbb\xbc\x8e\x62\xf3\x07\x73\x7d\xd4\xb6\xac\xa8\x15\x62\x5c\xa6\x23\x63\xb8\xbb\x30\xb5\x14\xf5\x06\x97\xff\x84\x04\x0d\x87\x21\xab\xc6\x6d\xb8\x9b\xbb\xaf\x48\x88\x59\x5c\x7b\x3a\xf0\xd8\xe9\x4b\x7b\x69\x3f\x81\x99\x92\x1f\xa1\xf6\xf2\xb1\x89\x2d\xce\x96\x4a\x41\x7d\x3b\x50\xbd\x61\x0c\x89\x84\x4f\xb2\x2a\x95\xd8\x7f\xf1\x1c\x42\x42\x95\xa8\x31\xfa\xfe\x6f\xa3\xf3\x33\xe0\xd8\xea\x11\xcd\xd5\x24\xf4\xc1\xd5\xf5\xb1\x4d\xb1\x4d\x6d\x03\x1f\xdf\xe3\x40\xd1\xae\xc8\x1a\x08\xfa\x92\x0e\x8f\x05\xbd\x71\x3c\x99\x60\x2e\x8c\xa5\xa0\xbf\xfd\xf1\xd9\x9f\x82\xd1\x7a\xcb\x00\xb4\xe3\xdd\x56\x9b\x51\xba\xa2\xce\xea\x45\x24\x10\xb6\xfe\x8f\xda\x1a\xa2\x00\x51\x24\x19\x9f\x03\xe6\x9c\x71\x08\xb1\x10\x68\xda\x44\x5e\x3b\x72\x96\xbf\x50\x2a\x33\xbb\xa6\xe6\x1e\x32\x65\x67\xd5\x4f\x1a\x2e\xa2\xf5\x1a\x29\x58\x30\x1e\xc6\xbe\x6a\x68\xd6\x1d\x22\xcc\x09\xf3\x77\x4d\x35\x95\x5c\x37\x44\x34\xdc\x4f\x0b\x99\x45\xd4\x75\xfa\x30\xf5\x93\xfb\xaf\x72\xe0\xd7\xcc\x28\x01\x99\x60\x6f\xee\x05\x78\xdd\x79\x90\xa7\x79\xc0\x4d\x8d\x56\x10\xb7\xed\x7a\xa8\x60\xb8\xa4\xb1\x4b\x77\xfc\xca\x25\x1c\x50\x2c\xd9\x31\x56\xcb\xaf\xa4\xc8\x1a\x4c\x42\x77\x87\x98\x8f\x24\xee\x29\x12\x6a\x6f\x3e\x1e\xe6\x70\x59\xed\xc8\x53\x8b\x31\x5d\x91\xbe\x68\x17\xaa\x91\xf6\x7c\x0d\xbe\x9e\xcb\xaa\x84\x96\x16\x4b\x0a\x33\x21\x51\x18\x6d\x58\x2e\xad\xb4\xf3\x64\x16\x53\x06\xeb\x37\xef\x0f\xb3\xe0\x57\x5b\x1e\xc5\xf6\x7a\x89\x7c\x57\x41\x81\x4d\xda\xc5\x78\x5e\x95\xc4\x73\x67\x97\xd4\x15\x19\x4d\x04\x18\x11\xad\x24\xd7\x3b\x42\x75\xe9\x7f\x53\xbb\x3b\xb7\x7d\xfd\x33\xb9\x17\x20\xfd\xf1\x27\xf0\x9b\x6c\xe6\x5a\xfa\xf9\x42\xdb\xda\x4a\xe4\x43\xfc\x00\x6f\x90\x7c\x86\x59\xf0\xab\x91\x4f\x42\x35\x92\xc1\x1d\xc6\x51\xeb\x54\x66\x14\x90\x7b\x25\x17\x88\xda\x14\x7d\x5d\x3d\x6b\x47\x4b\x09\xca\xec\x15\xad\xc6\x10\xa5\x84\x4e\x77\xfb\x70\x81\x84\x68\xd2\xc6\xcc\xdd\xb0\x33\x8e\xc5\x8c\x05\x26\x49\xd9\xec\xe8\xb9\xd4\x03\x73\x38\x6a\x69\xb4\xef\xa8\xad\xc9\x41\x62\x88\xed\x1f\xcd\x84\x98\x12\x5b\x83\xcb\xe9\x9b\x23\xc4\x91\x44\x5c\x7e\xfd\x3d\x69\x98\x45\xa5\xa6\xd9\x9a\xea\x5f\x2c\x6e\x5f\x63\xec\xe9\x2b\x5e\xfc\x06\x73\x66\x47\x5f\x8c\xc7\xb1\x87\xa9\xd4\x77\x60\x4f\x08\x25\x62\x86\xfd\x5d\xed\x3a\xb1\x60\x70\x40\xa6\x3a\xd4\x53\x69\x1d\x9a\x2a\x9b\xb4\x71\x3f\xd6\xe5\x92\x54\xff\x14\x8b\x6a\x95\xfe\x69\xef\x8e\xc6\x43\xb1\x6e\x5d\xf3\x1d\xda\x68\xbd\x8d\x16\xe0\xeb\xca\x5e\xd5\x16\xd9\xa8\xad\x7d\x25\x75\x25\x16\x4d\x7e\x49\x72\xf8\x86\x4a\x6d\x2c\x9d\xfc\xdf\x4a\xa9\xf5\x3c\x1c\x60\xae\x4c\xb6\xf5\x9f\x9d\x1d\x66\x80\xaf\xb6\x8b\x15\x56\x26\x83\x6d\xbe\xba\x54\xfd\xf9\xa1\x31\x1b\x44\x36\xbc\x66\x73\x27\x60\x4b\xd4\x66\x78\x4a\x11\x0e\x8d\x79\x22\xeb\x63\xd0\x96\x64\xd1\x3e\x53\x22\x47\xc6\x47\x4d\x41\xa6\xd9\x51\x13\x2a\xf1\xb4\x41\x21\x82\xcc\xb6\x48\xa8\xfc\xf9\xa7\xc6\xb7\xab\xd9\xe0\xa8\x31\x64\x0e\xca\x0e\x95\x4d\x79\x66\xe7\x6d\xc8\x71\x01\xe2\xbe\x28\xbf\xce\x75\x61\xc4\x5a\x0b\x9b\x47\x18\xf0\xc7\x88\x89\xcc\x05\x54\x49\x09\x97\x16\x23\x5b\x39\xe0\x39\x83\x75\x9b\x3c\x96\x55\x23\x92\x17\xa6\xbc\x45\x12\x4b\xf1\x80\xe7\xf5\x63\xc3\x4b\xcc\x53\xb2\x5c\x7a\xfa\x93\x7a\xb2\xfa\xd4\x48\x2d\x85\x5b\x89\x16\x40\xd3\x72\x3b\x23\x9c\x46\xb6\x3a\x01\x59\x18\x77\xeb\x70\x81\x34\x8b\x78\x8c\x25\x1a\x14\xd6\x4a\x34\xfa\x05\xd5\xb3\xec\x31\xa5\xee\xa9\x21\xd7\x56\x07\xbc\xf4\x16\x11\x1a\xd0\x7b\xe2\x13\xd4\x93\x58\x04\xa8\x77\xf7\xbf\x0d\x31\x30\xe6\x69\x4e\xed\x5d\x77\x77\x0b\xcd\x60\x6f\xef\x30\x96\x4c\x1f\xff\xc3\xc9\x47\x0f\x6b\x92\xdb\xdb\x3b\xa8\xba\xee\x20\xff\xcc\x59\xac\xcd\xaf\xf4\x86\xe1\x24\xa8\xfa\x3a\x85\x9b\x08\xe2\x35\xc5\xae\x6b\xdf\x81\x9a\xa3\x67\xba\xdf\x1b\xf5\xf7\x8d\xfa\xfb\x26\xb9\xe9\xa6\x39\xd3\x1c\x60\x82\x91\x8c\x39\xde\xd7\xa3\xd0\x95\x97\x9c\x05\x98\x39\x12\x5f\xe4\x99\x36\x92\x2d\xcb\x55\xfb\x7a\xbf\x4f\xee\xff\x5c\x5c\x83\xda\xd2\xbf\xe6\x59\x49\xcc\xf9\x44\xdc\xd5\xab\xcd\xb0\x72\x56\xe6\x71\x02\xbb\x4d\x9b\x47\xee\xcf\xad\x3a\xab\x50\xda\x54\x5b\x60\xa6\x00\x6c\x41\x8f\x7e\x2a\x3a\x57\x5b\xad\x62\xcc\x98\x54\xc3\x19\x91\xbf\xf0\xaf\xe3\x76\x69\x4f\xed\xf4\x89\x65\xb4\x89\x02\x21\xbc\xca\x21\xd5\xd0\xb6\x62\x8d\x54\x5b\x65\xa4\xfc\xfa\xca\xb1\x9b\x1a\xaa\xa9\xdf\xbb\x63\x67\xb1\x91\x45\x88\x80\x7f\x3c\x7f\xfe\xeb\xab\xc6\xad\x61\x65\x95\xc1\xcd\xff\x66\xb3\x5e\x5f\x65\x7a\x59\x66\x3a\x4b\x72\x4a\x57\x9f\xc9\xad\xc8\xef\xa9\x6d\xdd\x47\xdc\xdf\xda\xb5\x17\x87\x26\x39\xa6\xea\xd7\x31\x0a\xd4\xae\xef\x6f\xc1\xce\x05\xe6\x82\x08\x89\x5b\x28\x92\x9a\x1b\x5f\xd9\xa6\x30\x62\x0a\xea\x48\x22\x89\xe1\x98\x93\x7b\xbc\xbb\x6f\x7a\x16\xab\x80\x2d\x83\xc6\x78\x7e\x28\xcb\x43\x7d\x83\xb8\x6f\x3e\x19\x98\x56\xcd\xd1\xdf\xc8\x65\x34\x1a\xb5\x06\xe2\x99\xfa\xbf\x16\x87\x62\xb7\x51\xec\xaf\x4c\xa8\x34\x0e\x4f\x99\x87\x82\x91\xf0\x5b\x66\x47\x6e\x5c\x4c\x9c\x65\x50\x5a\x4d\x48\x9c\x25\x26\x07\x92\x12\x79\x33\x45\x3f\xa3\x63\xb1\xaf\x6b\x05\x37\xae\xe5\x73\xa5\xd1\xfe\x94\xf0\x80\xa2\xf1\xe7\xbb\x3a\x1e\x50\x01\x49\xea\x5a\x3a\xd0\xfb\x8d\x37\x32\x80\x15\x08\x86\xb7\x48\x7a\x0d\xa9\x90\xcc\xf8\xa3\xb5\x3f\x30\x60\x53\x73\x99\xf8\xf5\x9b\xe3\xd7\xa3\x94\x56\xcc\x85\xe9\x7d\x14\xa9\xee\xfa\x8c\x4f\xad\xda\xfb\xa2\xff\xb2\xff\x62\x30\xf3\x27\xe2\x26\x16\x98\xdf\x4c\x63\xe2\xe3\xfe\x4c\x86\x41\xb3\x46\x63\xae\x72\x1b\x4e\x40\x5f\x40\x65\xab\x98\x25\xc3\xcb\x0e\x8d\x08\x87\x60\xf3\x18\xe3\xe0\x4e\x43\x36\x65\xe2\x38\x46\x3e\x20\x8f\x33\xa1\x6f\xa7\x0e\x4d\xbd\xcb\x9c\xa0\x69\x04\x69\xaf\x00\x12\xb6\xdc\x2f\x12\xc4\x73\x5b\x73\x52\xd3\x30\x08\xb0\x0f\x63\x42\x11\x27\xf5\xce\x11\x58\x4f\xd6\xeb\x12\x1b\x36\x09\xd1\x74\x03\x01\xb8\xc3\xb0\x3e\x2c\x61\x89\xfc\x08\x8d\x61\xa2\x7f\xd6\xce\x40\x92\x1d\xe1\x7c\xd6\x85\x84\xaf\xf7\x97\x43\x1d\xff\x96\x38\x8c\x01\x51\x03\xbf\x16\x2c\xe3\x16\x89\x09\x0a\x49\x30\xef\x9b\xd1\x39\x25\x78\x1d\x16\x99\xad\x51\xa7\x7b\x11\x83\x6b\xfd\x6f\x6f\xf5\x7b\x43\x2a\xc1\xa9\x26\xee\xaf\x7a\xd8\xc3\xcc\x80\x93\x91\xf6\x53\x83\x48\x9f\x32\x39\x0b\x23\xe3\xf1\xaf\x05\x6a\x67\x91\xb3\xd0\x24\x86\x69\xe8\x6b\x9f\x3e\x03\x76\x70\xed\xe9\xb2\x8c\x3d\x33\x5e\xf3\xa5\xa6\xce\xf5\xcc\xea\x23\x7b\x19\x1a\x85\x46\x51\xa4\xb9\x55\x28\xcd\x99\x23\x32\x39\x57\x26\x74\x82\x39\x57\x44\xad\xe6\xec\x76\xc4\x26\xf2\x01\x71\x1b\xef\xd1\xd7\x7d\xde\xdc\x2b\x1d\x80\xd1\xfa\xee\x6c\x68\x87\x98\x0b\x89\x43\x17\x01\xfb\x94\x6e\x69\x71\x49\x2b\x4d\x82\xa3\x7d\xbe\x8a\xe3\xfd\x33\x14\x6e\x20\xe3\x75\x98\x85\xbe\xf9\xe3\x38\x77\xe3\x72\x72\x0a\xa2\x2f\x4b\xea\xb7\x8b\xf3\xf7\xb1\x52\xf6\xf4\x16\x67\x2f\x59\x32\xe4\x64\xa5\xe4\x8d\xfa\xee\x56\xd9\xf2\x71\x98\x94\xc8\x15\xb7\xf5\xb5\x55\xdc\x46\x99\x20\xa4\x6b\xad\x3d\x15\x23\xf4\xdb\xc9\x54\x84\xa5\x6b\xc8\x95\x72\x8d\x5d\x85\x26\x95\xb7\x2d\xeb\x88\x0b\x8e\x71\x18\x49\x32\xae\x0f\xb9\x5d\xb5\xa2\x7c\x16\xfe\x66\x99\x67\x64\xe5\xaa\x70\xc5\x18\x33\x0e\x79\x43\xb3\xb5\x73\x96\x68\x77\x51\x8a\xf0\x06\xcf\xb9\x6c\x56\x67\x93\x4d\xbe\x62\xe1\xef\x14\xf8\x7a\x34\x33\x97\x83\x2a\x9b\x8c\xfb\x3a\xbd\xac\x26\x99\xa7\x16\x66\x3e\xd1\x67\x73\x55\x2f\x9d\xd3\x3a\xb3\x34\x62\x40\x5f\x24\x56\x78\xef\xe5\xa3\x72\x7d\xd6\xd3\x43\xfe\xed\x3a\x37\xb8\x73\x72\x37\xac\xd7\x37\xe5\x00\x5f\xc6\xf5\xdd\x9a\x66\x2b\x1d\xde\xb9\xa9\x5e\xbf\x0a\x65\xef\x07\xd6\xf5\x40\x37\xe5\xef\x7e\xb7\xd0\xc7\x17\xf0\x7b\x2f\xd5\xe9\x9a\xd4\xa6\xcc\xe5\xa1\x4b\x24\x72\xba\xdd\xd4\x56\x87\x36\x88\xdb\x1b\x9c\xcd\xf2\x08\xb3\x91\x18\x9d\xc7\xdc\xcc\xd1\x70\xbc\xa8\xbd\x03\x89\x1c\x2c\xdb\x4d\x0c\xb4\x6f\xce\x8f\xef\xd0\xd7\x73\x65\xa7\xaa\xe9\x0e\x3e\x78\x94\x4f\x79\x58\xd1\x63\x03\x94\xd6\x24\x05\x8d\x64\x95\x15\x28\x79\x62\x69\x74\x17\x39\x62\x72\xf7\xb5\x35\xeb\xcd\xf0\x18\x9f\x90\x5b\x1d\xc7\xad\x5f\x66\x65\xb2\xbd\x7d\xe5\x55\x71\xa8\x34\x2e\x4c\xc2\x9b\xaa\xb9\x95\x55\x85\xc5\xdd\xc8\x2a\x85\x84\x1e\x45\xb1\x2b\xb6\xb5\x01\xad\x2f\x07\x7f\xf9\x23\xc7\xac\x02\x8d\x15\xb6\x3a\xfc\xd7\x8b\x62\x88\x9a\x61\x26\xd1\x61\xc5\x99\xb4\xe7\x0e\x4e\xbb\xe8\xfd\xbf\x24\xfc\xfd\xe8\xe2\x7d\x52\x7a\x6c\x15\x05\xc3\x1e\x52\xb8\xd4\xd2\x90\xd0\x9e\x17\xc5\x9b\x2c\xca\xb0\x84\x58\xa5\x71\xe8\x26\xa2\x85\x4b\xa2\xf9\x1c\xa3\xed\x19\xc6\xe2\xf9\xc5\xb0\x45\x99\x8c\x9a\xc8\xc1\x34\x64\x2a\x57\x66\xce\x14\x0b\x68\x25\x91\xdc\xe6\xf9\x9a\x71\xb8\x7e\x73\xe8\x4c\x83\x0f\x3b\x4b\xe9\x8e\x33\x32\x9d\xf5\xd0\x3d\x22\x01\x1a\x93\x80\xc8\x79\xbd\xa6\x78\x6d\x82\x49\x6f\x0c\xac\x0f\x3b\xcf\x5e\x0f\x4f\x4e\x8f\x47\xfd\xdc\xd7\xbb\x76\x27\xde\x87\xbd\xbd\xca\x9b\xff\xdd\x33\x36\xb5\xc1\x24\x83\x1f\xf7\xf6\xcc\x68\x9c\x7a\x98\x58\x3b\xed\x7a\xad\xed\x26\x8f\x51\xa6\xd7\x17\x7b\x7b\x6b\x27\xed\x44\x3b\xd1\x33\xba\x89\xa2\xf1\x59\xf8\x5f\x40\xf5\xcc\x77\xb8\x62\x79\xf8\xbc\x20\xcc\xcf\x51\x53\x88\x60\x89\xa7\xcc\xa4\x91\xd8\x03\x3c\x93\x4d\x61\xea\x3a\x69\x92\xd1\xb7\xcd\xe9\x7c\xf4\x16\x64\x01\x44\xc0\xed\xd9\xf9\xd9\xcd\xc5\xe5\xc9\xc9\xbb\x8b\xab\xe1\xab\xd3\x93\x5b\xab\x96\xba\x1e\xbc\xa4\x2e\x94\x37\x43\x74\x5a\x5f\xf6\x10\x2a\x90\x13\xd8\x63\x8a\xb4\xe7\x59\x96\x17\x70\x9b\xeb\xb7\x16\x6e\x49\x81\x7c\xdb\x74\x78\x3a\xbc\xfa\x77\xb1\xd6\x73\x6e\x48\xf5\x0e\xc9\xcc\x8b\x5f\xa9\x00\x7e\xd5\x40\x6a\x9b\x15\xc6\xd8\xae\x8b\x8a\xf7\x92\xe5\xf9\x43\x53\xce\xba\x93\x0c\x46\x65\xe0\x9b\x9a\xae\xc0\xb9\xcb\xf4\xb3\x99\xb4\x83\xf4\x2a\x3e\x57\x14\x62\xe3\x37\x7d\x76\xe9\x07\x5d\xfa\xc1\x37\x9b\x7e\x50\xca\xb1\x5d\x1e\x42\xc3\xd3\xe5\x21\x40\x97\x87\x50\xf1\x74\x79\x08\x5d\x1e\xc2\xb7\x76\x0c\x63\x9e\x2e\x0f\x01\x9e\x6e\x1e\x42\xe9\x46\xdd\x25\x24\xe8\xa7\x4b\x48\xa8\x7b\xba\x84\x84\x2e\x21\xa1\x4b\x48\xd0\x4f\x97\x90\xb0\xf0\x74\x09\x09\x5d\x42\x42\x79\xd3\x2e\x21\xa1\x4b\x48\xe8\x12\x12\xba\x84\x84\xe2\xd3\x25\x24\x54\x03\xec\x12\x12\xba\x84\x84\x2e\x21\x21\xfb\x74\x09\x09\xee\xe9\x12\x12\xba\x84\x84\x2e\x21\xe1\x89\x7b\xc2\xbb\x84\x84\xa7\x9a\x90\x50\xea\x00\xef\x32\x13\xba\xcc\x84\xcc\xd3\x65\x26\x74\x99\x09\x5d\x66\x42\x97\x99\xd0\x65\x26\x94\x3c\x5d\x66\x42\x97\x99\x90\x7b\xba\xcc\x84\xb5\xe8\xa0\x5d\x8a\x42\x97\xa2\x60\x9f\xef\x3c\x45\x21\xe6\x44\xce\x37\x90\x9b\x90\x81\xdb\xd4\x66\x35\xa6\x6d\xee\xa0\x4a\x51\xb1\x6d\x73\xc9\x07\x5f\xf1\xaa\xad\x3b\xcc\xc7\x98\x33\xb1\x29\x6b\xfe\x6d\x0e\xfe\xe6\xa4\x68\x66\x41\x5a\x77\x59\xb1\x44\xae\x3d\x70\xac\x7a\xf6\xf3\x09\x08\x1b\x50\xd1\xda\x1a\xb1\x3a\x16\xe0\x12\xa3\x20\xbc\xe2\xb1\x90\x87\x7e\x48\xe8\x48\x2b\x76\x9b\x33\x96\x8e\x2a\xfb\x5c\xca\x0a\xca\xa9\x65\x48\x01\x01\xa3\x92\xc2\xce\xf0\x02\x18\x87\x19\x13\x52\xf5\xd8\x1c\x7b\xe1\xf8\x85\xe3\x90\x49\xac\xe6\x56\xa8\x45\xe2\x0a\x47\x9b\x91\xa3\x63\x26\xcc\x17\xfa\xe7\x46\x98\x7a\xa5\x09\xa3\x62\x46\x36\x67\xe5\x16\x96\xef\xad\xef\x7d\xb1\x65\x7b\xeb\xd7\x1f\xc2\xd5\x2e\xd7\xdb\xe3\xa3\x85\x55\xaa\x4d\x99\x4a\xd0\x5f\x6e\x95\xbe\xca\x2a\xe8\x4f\x5f\x6c\x1d\xf4\xa7\xd5\x57\xc2\x4e\xa6\x9d\xb3\x8c\x07\xbc\x71\x29\x18\x75\xd6\x88\x5e\x4e\x7d\x84\xaf\x27\x7d\x1f\xc4\x8c\xc5\x81\x39\x2f\x8d\x05\xe6\x80\x69\x8b\x5b\x6a\x61\x71\xf9\xbe\xd4\x8a\x8d\x66\x88\x63\xff\x02\x09\xf1\xc0\xb8\xff\xc5\x96\x2e\xdf\xed\xea\x6b\x58\x7a\x3b\x30\x82\xb7\xef\x46\x8d\x53\x8e\xa9\xc7\xe7\x91\x62\xa3\x09\x09\xb0\x3b\xfe\x73\xa7\x21\x42\x23\x08\x91\xc5\x10\xc6\x58\x3e\x60\xdc\x74\xab\xad\xe1\xd2\x2c\x79\xa4\xbb\x9f\x5a\x5a\x77\x94\x9e\xe7\xe3\x16\x02\x15\x05\xe1\xfe\x57\x65\x74\x43\xc7\x6e\x38\xed\xe8\xa4\xf9\xb0\x18\x16\x09\xe5\x24\xd7\xd1\x32\x94\x91\x31\xd7\x5e\x07\x68\xaa\xaf\x34\xa3\x3e\xf1\x90\xd4\xf7\xad\xc9\x99\xbe\xab\xaf\x71\xb2\x4d\xd7\xe4\xaf\xfc\x25\x7f\x3b\x89\xb3\x67\x82\x02\x61\xc2\x69\xed\x19\xb3\x3e\x6a\x6f\x26\x0c\xa6\x27\x59\xe7\x2c\xe9\x21\xa6\xb4\xc1\xb2\x89\xaf\x1b\x0b\xae\xbd\xf3\xbd\xe3\xf1\x5b\x5c\x6b\xdb\xc3\xa3\x78\xfc\xad\xed\xe1\x09\xb2\xb3\xb5\xac\xef\xf0\xdc\x79\x8b\xdf\x1e\x1f\x35\xc2\xf4\x91\x44\x63\x24\x1a\xf3\xfa\x56\x5f\x13\x3c\xdf\xbc\xe8\x7d\x9b\x76\xf2\x04\x57\x26\x91\xb0\x3a\x85\xcf\xec\x9a\x8d\x20\x23\xce\xee\x89\x8f\x7d\x35\x81\xc6\x01\xa7\x65\x36\x0e\x26\x3d\x41\xa6\x54\x59\x19\xca\x04\x98\x68\xe6\xdf\x6f\x97\x1a\x98\x20\x42\x04\x4c\x31\xc5\x5c\x5b\x2b\xe3\x79\xa2\x16\x6c\x92\x0a\x74\xf0\xf5\x46\x49\x40\xf7\xb0\xe6\xf5\x6f\x13\x6b\xee\x06\x67\x16\x3f\x39\x71\x19\x8d\x4e\x1d\x61\x28\x33\x10\x86\x13\x1d\xc7\xee\x16\xb6\xde\xdd\xa3\x9e\x7c\x78\xaa\x6d\x07\xa8\x8a\x08\x36\xbe\x78\x5f\x84\x8f\x73\x3d\x7d\x8f\xcc\xac\x07\xd8\xcc\xd1\x2d\xd4\x30\x22\xbe\x26\x47\x87\x62\xb3\x3b\xad\x86\xbf\x3a\x01\xc4\x9c\x24\xfb\xe0\xbb\x91\xde\x16\x6d\x1a\x48\x5b\x02\x80\x7b\xc4\x09\x8b\x05\x08\x4c\x05\x91\xe4\xde\x30\xf8\xe6\x53\x1e\xda\xc6\x4e\x16\xa2\x27\x15\xc5\xdf\x85\x62\x70\xa4\x90\x67\xcd\x93\xd7\x3e\x82\x12\x9c\x82\xbe\xb9\xe5\x7e\xa4\xa1\x9b\x3d\x24\x2f\xb1\x4d\xda\x99\x1f\x89\x88\xce\x84\xaf\x6b\x16\x8f\x22\xcc\x3d\xa4\xa8\xc7\x67\x21\x22\xb4\x4d\x31\x05\xe7\xf9\x10\x49\xf8\xbb\xb1\x8a\x54\x3f\x9b\x22\x20\xce\x98\xbc\xe0\x84\x7a\x24\x42\xc1\xe6\xa5\xf5\x65\x59\x77\x4f\x50\x64\xab\x69\x81\xc8\x21\x9a\xc8\xcc\x8d\x2d\x83\x9c\xca\x53\x32\xc1\x92\x84\xf8\x0d\x8b\xeb\xab\xe7\xc0\x97\xcb\xf3\xbb\x2a\xa0\xb5\xfa\x4a\x05\x16\x8c\xe3\x38\x49\xbc\x3b\x2c\x61\xca\x11\x95\x6d\x2a\x71\x98\xf7\xb5\xb5\x3f\x53\x98\x94\x31\x1e\xe3\xc6\xbb\xe4\xbe\x6a\xe6\xe1\xe7\x9a\x5b\x69\xe1\x5c\xef\xc5\xf3\x84\x01\x95\xfc\xdf\xdc\xa2\xf3\x58\xc8\x0d\xeb\xb8\x57\x49\x1f\x5f\x41\xcb\x4d\x07\xf8\xbd\xea\xb9\xe9\x08\x37\x2f\x3b\xaf\x16\xfa\x7a\x82\x82\xf3\x71\xba\x6e\x3a\x9d\xdf\xae\xb6\x2b\x72\x39\x6a\x6b\x3c\x72\xce\xc1\xdd\xc4\x91\x73\x9b\x0e\x6a\x68\xaa\xa4\xe2\x5d\x32\x19\x40\xa8\x20\x7e\x35\x2f\x7d\xbd\xc3\x69\x9d\x43\xf8\xbb\x49\x21\xdc\x50\x1a\xb0\x85\xbe\xfc\x29\xb1\x9a\x55\x9b\xdd\xa8\xf8\xb3\x30\x99\x8d\x22\xd8\x4d\x28\x0c\x25\xb8\x60\x29\x9d\x4c\x6e\xb6\x60\x11\x47\x11\xe3\x8a\x1d\xd2\xf0\x43\x8b\x6a\x5d\x19\x84\x8a\xa0\x34\x8b\x27\xa1\xd3\x34\xe4\xd0\x7e\x27\x9e\x25\x5d\xdd\xb8\xdf\x5c\xd6\xa6\xd8\xad\x67\x64\x11\x7b\x33\x40\x02\xb6\x5e\xf4\x5f\x6e\xc1\x0e\xa1\x5e\x10\xfb\x4a\xd8\x20\x10\xf1\x38\x24\x94\x71\x37\x45\xfb\xb9\x97\xfb\x2f\xff\x6f\xab\x01\xb6\x15\x30\xd7\x5b\x11\xc7\xf7\x04\x3f\x6c\x39\x48\x6b\x1e\x3d\x93\x33\xcc\xd3\x01\xd7\x4b\x9c\xe1\x64\x21\x19\xd6\x6a\x27\xc2\xc9\x54\xc5\xc9\x42\xc2\x31\x1e\x13\x44\x1d\xce\x6b\x8f\x19\x60\x96\x0c\x8f\x58\x18\x31\x8a\xa9\x5c\x7f\x0e\xe9\xf9\x42\x17\xab\xf1\x88\xc0\x3a\x37\xd4\x4b\xc0\xa8\x99\x42\x9e\x24\xf7\x4d\xf1\xce\x8c\xb6\x11\x3d\xf0\xf8\xd0\xd8\xaf\x5f\x93\x71\xf9\xcc\xcd\x47\xec\x1e\x8b\x2b\xdb\x10\x62\xd8\x1c\x4f\x06\xd0\x83\xa3\xf3\x77\x17\xe7\x67\x27\x67\x57\x2d\x83\xc9\x54\x9b\xc3\xb3\xc3\xa3\xf3\xb3\xe3\xc3\x86\xd7\x8e\xcf\x8f\xde\x9e\x5c\x36\xbd\x74\xf9\x7e\xd8\xd4\xdf\xeb\xd3\xe1\xd9\xdb\x86\x77\xde\xbc\x3a\x1c\xd5\x05\xb6\xe9\x77\x86\xbf\x9f\xdc\xfc\x71\xf2\xea\xcd\xd1\xe1\x55\xc3\xab\xbf\xbd\xbf\xf8\xf7\x55\x23\xf2\x6f\x4f\x2e\x5f\x9d\x5c\x9e\xd7\x6b\x78\x3a\xa4\x6e\x74\x75\xde\xf0\xd2\xe5\xe1\xd9\xaf\x8d\x1d\x8e\xce\x4f\x9b\x5e\xf9\xcf\xc9\xc5\xc5\xc9\xe9\xf0\xac\xe9\xb5\xf3\xf3\xb7\x27\x27\x17\x35\x3d\xb6\x8b\x6f\x6a\x19\x5f\x96\xd6\xe3\xbd\x68\x19\x36\xb5\x6a\xf8\xaf\x83\xde\x56\xee\x15\xf2\x51\xd3\x51\xeb\x5a\x35\x4a\x0c\x52\xf0\x11\x0e\x1b\x8a\xd8\xba\x6c\x30\xed\x9b\x74\x68\xcc\xb5\xab\x59\x27\x93\x26\xfb\x8f\xb2\xb7\x6f\x23\x8e\x27\xe4\xe3\x81\xed\x6c\x5e\x9f\x8c\x69\xbc\x0d\xb9\xa4\x41\xb8\xf5\x94\x8d\x6b\x8b\xe4\xc8\x30\xea\xfb\x84\xdf\x9a\x11\x4c\x58\x10\xb0\x87\x26\xc1\xa4\x71\x4a\xd4\x15\x83\x0f\x16\x2e\x70\x80\x70\x08\x51\x14\x29\xa5\xf3\x00\xf6\xc0\x43\x11\xf2\x88\x9c\xf7\x84\x37\xc3\x7e\x1c\x34\x85\xae\xdd\x2e\x36\xe8\x7f\x0c\x83\x5b\x05\x4a\xe1\x6d\xd0\xef\x09\x22\xb1\xfb\xde\x27\x42\x7a\x51\x03\x58\xf3\x92\xcb\x87\x70\x4d\x67\xfe\x44\x1c\xc0\xad\xfa\x27\x07\x72\x46\xee\x55\x57\xea\x9f\xf4\xfb\xda\x0e\xf6\xd4\xa8\x39\xf6\x0f\xe0\xd6\x7c\xc8\xc1\x8b\xc8\xf4\x00\x6e\x23\x32\xed\xa7\x44\x72\x0b\x7b\xf5\xca\x56\x84\xf8\xdd\x01\xdc\xea\x7f\x1d\xe2\xa2\xaf\xa8\x45\x81\x9c\x23\x4e\x0f\xe0\x56\xfd\x93\xe9\xea\x75\x43\x94\x96\xae\x5b\x44\xa8\xa1\x0b\xa3\xa6\xe9\x12\xa3\xd6\xdf\x9a\x62\xb7\x42\x8e\x8b\x81\xd1\x4b\x61\xd4\x97\xe1\x5a\xde\x96\x93\x68\x4a\xe8\xf4\x55\xec\xdd\xe1\xca\xea\xbf\x2d\x78\xbf\x68\xca\x65\xc1\x2e\x67\x68\x1d\x16\x4c\xf7\xb1\x86\x91\x29\x57\x55\x57\x3d\xe4\x4f\x36\x06\x1f\x47\x98\xfa\x98\x7a\x04\x8b\xfd\x9c\x20\x30\xc5\x9f\xf4\x3b\x9c\xdc\x63\x5d\x14\x5b\xb0\x00\x03\xd3\xf9\x65\xd5\xca\x91\x4d\xaa\xf6\x59\xc6\x21\x37\x57\x0a\xba\x19\xa6\xc5\x71\xdf\x62\x9e\x73\xdd\x54\xc2\xf4\xb1\xc4\x3c\xd4\x65\xba\x0b\x23\x0e\x98\xa7\xe9\x08\x76\xde\x8f\xf6\xe1\x70\x34\x3c\xd4\x9e\xbf\x93\xf7\x3a\x60\x50\x61\x52\x4d\x8f\x96\x60\xb6\x45\x01\x37\x40\x9e\xc7\xb8\xb6\x2b\xac\x8e\x5d\xc8\xf4\xff\x8b\xd5\x64\xca\x3e\xcc\x30\xc7\xba\xe3\x34\xbf\x5f\xa8\x89\x0e\xd8\x5c\xa9\xf0\x56\x4c\x51\xf0\x38\x46\x12\xeb\xbf\x4d\xf6\x6c\x8d\x3d\x4c\x04\xd8\x64\xf9\x5e\x80\xef\x71\xb0\x0f\x11\xe6\xbd\x64\xf0\x16\xef\x1d\x91\xcb\x1e\xb3\xa3\xaa\x04\x6b\x5a\xad\x29\x6d\xdd\x76\xd6\x33\x40\x77\x6b\xec\x9a\xbd\xbd\xab\x24\x24\x08\x38\xfe\x6f\x4c\xb8\x92\xde\xe5\xa4\xac\x58\x64\xdf\x94\x6f\x83\xf7\x97\xc3\xea\x29\x62\x15\x10\xfa\x7b\x55\x52\x6e\xe5\x3a\xf9\xcd\xc7\x6b\xd9\x63\x35\x8b\xce\xa0\x96\xbf\x93\xa3\xb4\xca\xb2\xcf\x12\x87\xd1\xba\x25\xcf\x55\x02\x73\xcd\x62\xa7\xce\xc1\x8c\xa3\x19\x0e\x31\x47\x41\xc2\x1e\x56\xd0\x08\x1d\x4b\x94\xda\xed\x23\xb5\xef\xe8\x1f\xdf\xa1\xe8\x12\xfb\x71\x4d\x41\x97\x19\x51\x9d\xce\x9d\x0e\x53\x25\x83\xd4\x24\x26\x02\xa8\x31\x94\x55\x7b\x93\x57\x11\x3d\xd5\x8b\x6c\x45\x52\x46\xf4\x64\x50\x6a\x92\x3b\x95\x60\x95\x3c\xfa\x8a\x72\x27\x9f\xaa\x35\xae\xa7\xf3\x19\x52\xbc\x7e\x75\x75\xaa\x6c\xf4\xff\x7b\x0e\x3e\x9a\x8b\x7d\x18\xc7\x52\xaf\x98\x87\xa8\xae\x56\x81\xe8\x5c\xbf\xb3\xc3\x38\x50\x46\xf1\x2e\x90\xea\x13\x53\xd5\x30\x5d\x63\x87\x53\x2b\x31\xd3\x20\x1b\x8b\xe2\xe7\xfb\x17\x33\x0f\x1b\xb9\xbc\x65\xc3\x77\xb6\x7c\xb5\xab\x5a\xba\xfb\x59\x0a\x28\x76\xf7\xb3\x74\xf7\xb3\xd4\x71\x67\x77\x2d\x4b\xc3\xd3\x5d\xcb\x02\xdd\xb5\x2c\x15\x4f\x77\x2d\x4b\x77\x2d\xcb\xb7\x56\x8c\xce\x3c\xdd\xb5\x2c\xf0\x74\xaf\x65\xe9\x6e\x63\x59\x78\xba\xdb\x58\xea\x9e\xee\x36\x96\xee\x36\x96\xee\x36\x16\xfd\x74\xb7\xb1\x2c\x3c\xdd\x6d\x2c\xdd\x6d\x2c\xe5\x4d\xbb\xdb\x58\xba\xdb\x58\xba\xdb\x58\xba\xdb\x58\x8a\x4f\x77\x1b\x4b\x35\xc0\xee\x36\x96\xee\x36\x96\xee\x36\x96\xec\xd3\xdd\xc6\xe2\x9e\xee\x36\x96\xee\x36\x96\xee\x36\x96\x27\xee\x00\xef\x6e\x63\x79\xaa\xb7\xb1\x74\x97\xb0\xb8\xa7\xbb\x84\xa5\xf0\x74\x97\xb0\x74\x97\xb0\x74\x97\xb0\x74\x97\xb0\x74\x97\xb0\x94\x3c\xdd\x25\x2c\xdd\x25\x2c\xb9\xa7\xbb\x84\xe5\x31\xaa\x67\x77\xf7\x4a\x77\xf7\x8a\x7d\xbe\xd7\xbb\x57\x02\x34\xc6\x41\xe9\x9e\xd2\x60\x22\xb6\x4f\xbd\x6e\x60\xf6\x02\x93\x9f\x6a\x84\x4a\xde\xab\x73\x30\x99\x51\xe8\x14\x10\x21\x98\x47\x74\xcd\x6c\x22\x67\x46\x55\xae\x0d\xfa\xd7\xfd\x99\x84\x6a\x27\x22\x6d\xb1\x74\x78\xa1\xd9\xe9\xe7\x1f\x15\xe9\x73\xe4\xa9\x8d\x03\x02\x46\xa7\xe6\xb8\xb7\x52\xc4\x2b\xb9\xcc\x78\xe8\x12\x85\x92\xc4\x69\xb8\x38\xba\x3c\x01\x8e\xa7\x71\x80\x38\xe0\x8f\x11\xc7\x42\x17\xb3\x81\xe8\xd3\x69\xf0\x39\xfa\x74\xca\x3e\x7f\x7a\xbe\xff\xf3\xcb\xcf\x35\x88\x1a\xfa\xff\x7a\xa8\x5e\xa7\xb8\x46\x9f\xce\x3e\xdf\xf4\x3e\x28\x94\x7f\x2c\x47\xf9\x8c\x99\xe3\x77\x39\x43\x14\x7e\x7c\xe9\x96\xc9\x43\x54\xe1\x9e\x2c\x95\x6f\xd6\x0a\xc1\x94\xdc\x63\x5a\x5b\x4a\xbc\x15\x1b\x5a\x6f\x59\x96\x1a\x2b\x68\x70\x31\xcd\xbf\x40\xed\x0b\x31\xdf\x16\x76\xe6\xe2\x91\x85\xf3\xe5\x96\xa2\xa2\xce\x7b\xbf\x50\xfb\xd3\xfd\x69\xdc\x08\x7c\xb0\x88\x69\x75\xfe\x50\x84\x38\xa6\x45\x0d\x33\x8e\x7c\x24\xf1\x15\xc9\x9b\x75\x15\xd3\xe4\xd4\x45\xd5\xa4\x57\x08\x98\x28\x4c\xe1\xfb\x04\x6e\xe6\x9d\x4a\x63\xad\xde\x30\xd3\xb1\x23\xd2\x6e\xda\xf0\x80\x04\x04\x48\x48\x8b\x7a\x6e\x23\x6a\x35\xe5\xf7\x8b\xb5\xa3\xaa\xf4\xe5\x2a\xfd\xb8\x30\xd8\xc5\x7a\x51\x2b\x8e\xd4\x8b\xb9\x5a\xa3\x6c\x0d\x29\x2d\xb9\x1e\xac\xe6\x92\xcc\x42\xeb\x41\xff\xff\x00\x00\x00\xff\xff\x17\xc9\x40\x7e\x87\xee\x01\x00"), }, "/dlp": &vfsgen۰DirInfo{ name: "dlp", @@ -434,9 +434,9 @@ var Assets = func() http.FileSystem { "/gkehub/beta/membership.yaml": &vfsgen۰CompressedFileInfo{ name: "membership.yaml", modTime: time.Time{}, - uncompressedSize: 18494, + uncompressedSize: 18491, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7c\x5b\x6f\xdb\xc6\xb6\xf0\xbb\x7f\xc5\x42\xf3\xd0\xa6\xd0\x25\x49\x3f\xf4\x41\x7b\x03\x1f\x54\x49\x71\xd5\xf8\x06\x59\x6a\x90\xf3\x62\x8d\xc8\x25\x69\x8e\x87\x33\xec\xcc\xd0\x8a\x76\x91\xff\x7e\x30\x37\xde\x69\xcb\x89\x7b\xd0\x03\x94\x2f\xb1\xc8\xe1\x9a\x75\xbf\xcd\x62\x5e\xc1\x44\xa4\x47\x49\x77\x7b\x0d\xef\xde\xbc\x7b\x07\xe7\x42\xec\x18\xc2\xc5\xc5\x64\x00\x63\xc6\x60\x61\x1e\x29\x58\xa0\x42\xf9\x80\xf1\xe0\xec\x15\x9c\xbd\x82\x0b\x1a\x21\x57\x18\x43\xc6\x63\x94\xa0\xf7\x08\xe3\x94\x44\x7b\x0c\x4f\x7a\xf0\x3b\x4a\x45\x05\x87\x77\x83\x37\xf0\x83\x59\xf0\x9d\x7f\xf4\xdd\xeb\x7f\x9d\xbd\x82\xa3\xc8\x20\x21\x47\xe0\x42\x43\xa6\x10\xf4\x9e\x2a\xd8\x52\x86\x80\x9f\x23\x4c\x35\x50\x0e\x91\x48\x52\x46\x09\x8f\x10\x0e\x54\xef\xed\x36\x1e\x88\xc1\xe3\x93\x07\x21\x36\x9a\x50\x0e\x04\x22\x91\x1e\x41\x6c\xcb\xeb\x80\x68\x87\xb1\xb9\xf6\x5a\xa7\xa3\xe1\xf0\x70\x38\x0c\x88\xc5\x76\x20\xe4\x6e\xc8\xdc\x4a\x35\xbc\x98\x4f\x66\x57\xb7\xb3\xfe\xbb\xc1\x1b\xf7\xce\x8a\x33\x54\x0a\x24\xfe\x91\x51\x89\x31\x6c\x8e\x40\xd2\x94\xd1\x88\x6c\x18\x02\x23\x07\x10\x12\xc8\x4e\x22\xc6\xa0\x85\xc1\xf8\x20\xa9\xa6\x7c\xd7\x03\x25\xb6\xfa\x40\x24\x9e\xbd\x82\x98\x2a\x2d\xe9\x26\xd3\x15\x76\x05\xfc\xa8\xaa\x2c\x10\x1c\x08\x87\xef\xc6\xb7\x30\xbf\xfd\x0e\x7e\x19\xdf\xce\x6f\x7b\x67\xaf\xe0\xe3\x7c\xf9\xeb\xf5\x6a\x09\x1f\xc7\x8b\xc5\xf8\x6a\x39\x9f\xdd\xc2\xf5\x02\x26\xd7\x57\xd3\xf9\x72\x7e\x7d\x75\x0b\xd7\xef\x61\x7c\xf5\x09\x3e\xcc\xaf\xa6\x3d\x40\xaa\xf7\x28\x01\x3f\xa7\xd2\xe0\x2f\x24\x50\xc3\x48\x27\xbd\x5b\xc4\x0a\x02\x5b\xe1\x10\x52\x29\x46\x74\x4b\x23\x60\x84\xef\x32\xb2\x43\xd8\x89\x07\x94\x9c\xf2\x1d\xa4\x28\x13\xaa\x8c\x38\x15\x10\x1e\x9f\xbd\x02\x46\x13\xaa\x89\xb6\x77\x1a\x44\x0d\xce\x28\xdf\x8a\xd1\x19\x80\xa6\x9a\xe1\x08\xce\xef\xf1\xd7\x6c\x33\xbc\xc4\x64\x83\x52\xed\x69\x7a\x06\x10\xa3\x8a\x24\x4d\x0d\x88\x11\x2c\xf7\xe8\x17\x41\xb1\x08\x24\x2a\x91\xc9\x08\xcf\x00\x3e\xf7\xe3\x88\xf5\x95\x96\x59\xa4\xfb\x9c\x24\x38\x82\x0a\x34\xf7\x7c\x4f\x54\x9f\x92\x64\x04\x5b\xc2\x14\x9e\xa5\x44\xef\x95\x41\x63\x87\xda\xfc\xd3\xb2\xe9\x36\xe3\x91\xf9\x65\x74\xd0\x0a\x71\x87\x46\xf5\xb6\x42\x26\x96\x3a\x20\x1b\x91\x69\x20\xd5\xdd\x00\x52\x22\x49\x82\x1a\xa5\x72\x80\xfb\xd0\x82\x94\xb9\x82\xf2\x8c\x40\xcb\x0c\xfd\xcd\x0a\x1a\x63\xd8\x66\x8c\x01\xe5\x4a\x5b\x55\x17\xdb\xfa\x76\x46\xe9\x8e\xa7\x12\x60\x17\xff\xed\x48\x88\x91\xa1\xc6\x53\x69\x70\xab\xff\x16\x28\x8f\x19\x7b\x2e\xd6\x8c\x9d\x8a\x77\x2a\xc5\x7f\x63\xa4\x1f\x43\x5a\x45\x7b\x4c\xc8\xc8\xff\x02\xd0\xc7\x14\x47\x60\x1c\x06\xdf\x55\x60\x31\x11\x59\x71\x7f\x03\x30\x46\xd5\xc9\x76\x62\xd6\xb6\x68\x59\x42\xf8\xf1\xff\x24\xf5\x26\xd6\x08\x8e\x5c\x5b\x24\xdd\x52\x8f\x6f\x41\x4e\x78\xd7\xbb\xb5\x86\xda\x39\x2f\x44\xe3\x9c\x36\x35\xfc\xf3\x4f\xff\xe7\x97\x2f\xc3\x80\xa4\xb9\x1b\xfe\xfe\xf2\x65\x98\xe4\x60\xcc\x03\x43\xcf\x97\x2f\x15\x80\x99\x42\xd5\x57\x9a\x68\xec\xef\x29\xd7\x15\xa2\xdc\x8a\x94\x48\xe4\xba\x1f\x09\x6e\x42\x21\xca\x3a\x73\xdd\x2a\x46\x36\xc8\xd4\x08\xdc\xbf\x95\x47\xc6\x73\x46\x12\x89\xc6\x16\xe8\x35\xb7\x5a\x7e\x24\x91\xc4\x7d\x4d\x13\x14\x99\x1e\xc1\x9b\xca\x33\xeb\x89\xba\x1e\x3a\x63\x69\x3e\x75\x42\x11\x9b\x36\xcd\xf0\xbf\x9d\xcc\xf3\x1f\x55\x4a\xfb\x75\x55\x48\xa5\x48\x51\x6a\x8a\xaa\x90\x3c\xc9\xf4\x5e\x48\xaa\x8f\xc5\xad\xd6\x9d\x0b\x74\x77\xc2\x47\x9d\x71\x78\xb5\x6d\x89\x03\x51\xa8\x45\xdb\xe2\x8a\x69\x7d\x7f\x6d\xff\x20\x6c\x00\xbf\x8a\x83\x4d\x22\x62\xe4\x9a\x6e\x8f\x70\x10\xf2\x9e\x09\x12\x2b\xd8\x4a\x91\xb8\xfc\xa8\x80\x3c\x28\x41\x84\x3c\xa6\xc7\x22\xca\x12\xe4\x2e\x30\x9b\x5c\xe2\xa3\x07\x02\x73\x0b\x56\x1f\x6d\xb8\x4f\x84\x44\x88\x51\x13\x6a\xb4\xc1\x64\x45\x6a\x34\x1c\x46\x4c\x64\xf1\x60\x67\xf3\xbf\x41\x24\x92\xe1\x7d\xb6\x41\xc9\x51\xa3\xea\x23\xdf\x51\x8e\xc3\x58\x44\x6a\xb8\x17\x87\xbe\x16\xc3\x80\x5f\x9f\x7a\xd0\xdf\x97\x50\x6a\x63\xba\xb9\xc2\xda\x1b\x29\x1e\x68\x8c\xb2\xfa\xb4\xc5\xbe\x3b\xe5\x30\xaf\x41\xaa\xad\x36\x7a\x79\xcd\xd9\xb1\xa2\xcd\xad\x22\xb8\xce\x74\x9a\x69\x10\x9c\x1d\x07\x30\xe6\x39\x8a\x86\x06\x0b\x19\xf4\x9e\x68\x90\xb8\x65\xc6\xa0\x2d\x9f\xd7\x54\xa9\x0c\xe5\xba\x06\x17\x4c\x0a\x68\x9e\x07\xde\x94\x80\x09\xc1\xaa\x32\x73\x20\xbe\x81\x01\xf6\xfd\x47\x29\xcb\x75\x6b\x0c\xbf\xdd\x5e\x5f\xc1\x47\xdc\xc0\x52\xdc\x23\x87\x1f\x7e\xfb\xb8\x7c\xed\x51\x80\xd5\x62\x3e\xc8\x49\x82\x24\x53\xba\x41\x97\xd2\x44\x6a\x97\x84\xaf\x83\xbe\xac\x4d\x2a\x08\x1b\x13\xa6\x1f\x08\xa3\x31\xac\x16\x17\x6e\x09\x43\xbe\xd3\x7b\xf8\xf7\xbb\x37\x6f\xde\x40\xb4\x27\x92\x44\xc6\xfb\x0f\x1a\x60\xe7\x5b\x50\xa8\x7b\x86\x67\x3c\x54\x1e\x07\xca\x98\x89\xa1\xe2\xe0\xc1\x5e\xcf\xa7\x13\xd0\x06\xed\xb2\x29\xd0\x36\xf2\xc1\x26\x40\x99\x01\xa7\x69\x44\xb4\xab\x1b\x6a\x42\xb9\x0b\x42\xb9\xb3\x42\x71\xf0\x63\xaa\x22\x93\xee\x1e\x1b\x10\x2d\x3e\x1b\x34\x39\xb0\x09\x76\x2e\x47\xb7\x28\xac\x16\x73\xb3\x9f\xc5\xd2\xec\x55\xc5\x11\x3d\x8a\x4d\xaa\x27\x0c\x89\x91\x6f\xc1\xf3\x98\x2a\x53\x52\xa8\xa6\xbd\x96\x04\x13\x11\x6e\x8a\xa5\x0d\x42\x4c\x25\x46\x9a\x35\x71\x4d\x44\x4c\xb7\x14\xe3\x7f\x01\xd5\x56\x90\x66\x75\x64\xb6\xc3\x18\x7e\x30\xe2\x6a\x3a\x04\xbf\x77\xfc\x1a\x36\xb8\x15\xb2\x6e\x2b\x00\x99\x32\xb8\x12\xe0\x78\x08\x1a\x63\x41\x49\xec\x23\x27\x1b\x66\x9e\x36\xc0\xbe\xae\x92\x1d\x78\x9f\x5b\xad\x10\xec\xeb\x55\xff\x63\x0b\xb4\xaf\xb6\xff\xef\x2b\x0e\xc0\xe4\x3a\x66\x93\x50\x46\xb6\x5b\x32\x50\xde\xd4\x93\x3d\x8d\xf6\x85\xb4\x82\xda\x48\x8c\xc4\x8e\xd3\xff\x60\x6c\x81\x4b\x5b\xf1\x11\x30\x3c\x65\xd8\xe4\x5b\x03\xac\xa1\xcd\xe8\x1e\x98\xda\xc8\x7a\x22\xaa\x40\xed\xad\x44\x37\xa8\x0f\x88\xbc\x96\x71\x2a\xb7\x6c\x83\x4c\xf0\x1d\x68\xd1\xb4\x11\xf3\xf8\xd7\x6c\x33\x80\xf7\xa6\x7e\xb5\x90\xf7\x42\x99\xf2\x93\xf2\x11\xfc\x79\xb3\xb8\xfe\x6d\x36\x59\xde\xcd\xa7\x5f\x7a\x55\x26\x58\xda\x5d\xca\xd7\x74\x7d\x0a\xd6\xe5\x57\x07\xfb\x6c\x33\xa0\x2e\xa0\xac\x7b\x40\x98\xde\x8b\x6c\xb7\x0f\xd6\x0b\x2a\xb3\x91\xd6\x58\x50\xb4\x27\x7c\xd7\x54\x3c\xca\x8d\xce\xa1\x84\x07\xd7\x4a\x50\x4e\x28\x54\xc1\xf8\x66\x3e\x28\xc2\x8d\xcb\x5b\x96\x34\xc1\x66\x38\x6f\xa8\x93\xc3\x7e\x04\xc6\x62\x6d\xf6\xd1\x1d\xea\x27\x39\xdc\xb3\x13\x14\xab\x3b\xa8\x7c\x34\xde\xcd\xf0\xb1\x54\xda\x1e\x88\xf2\x78\xc7\x83\x0a\x06\xa5\xa0\x4b\x93\x24\xd3\xc6\x36\x6b\x7b\xb9\xd4\xe9\xe5\xe9\x9d\xe6\x70\xff\x1a\x7a\x1d\xde\x5f\x43\x6f\xb1\xd3\xd3\x04\x37\x88\xca\x5f\xee\x4c\xc2\x4a\x6b\x72\x15\x2b\x92\xf2\x9e\x6b\x76\xb8\xb2\xe7\xe7\x9f\x3a\x03\xda\xa5\xf1\xb7\x09\xd1\x91\x6b\x54\x49\xdc\xe1\xe7\x11\xac\x7f\x5c\xc3\xd2\xf5\xb6\x90\xc5\x46\xf1\x53\x89\x0a\xb9\xb6\x99\x18\xc3\x1d\x89\x8e\x90\x66\x32\x15\x0a\x55\x49\xa9\x91\xc7\xa9\x30\xc9\xfe\xb3\x33\xd4\x99\x7f\xf3\x94\x04\xb5\x65\x6d\x47\x0a\x11\x56\x56\x6a\x3e\x2d\x8c\x7e\x44\xfb\x32\xcb\x06\x27\xa4\x81\xbb\x7b\x9c\xb0\x4c\xe9\xae\xfc\xa7\x41\x62\x0b\x99\xe7\x39\x8c\xae\x75\x5d\xc4\x76\xbe\xd9\x41\xfa\xf9\x87\x59\x3f\xef\x8e\x95\xc8\x1f\x80\x31\x8c\x5c\x9e\xd4\x69\x4e\xc3\x8d\x95\xac\xc0\xba\xfe\xf3\x0f\x33\x88\xdc\xfe\xf5\xdc\xa0\x8b\x5f\x60\x0d\xd1\x35\xc3\x2e\x28\xbf\x6f\x3e\x7d\x34\x7c\xb6\x70\x6f\x51\x82\xd6\xb2\xba\x6a\x1f\xf3\x60\x98\x03\xb8\x45\xb6\xed\x33\xca\xef\x43\x80\x3c\x9f\xdc\xe4\xa8\xe5\xbd\xc4\xf3\x0f\xb3\x16\xa0\x90\x93\x6d\x03\x0f\x7e\x26\x49\x6a\x8c\x7d\x38\xcc\xeb\x56\x5f\x7e\x90\x94\x2a\x5b\x82\xe4\x65\x74\x72\xec\xfb\xbf\x4b\x55\x74\xa6\xfa\x07\x54\xfa\x6d\x9f\x0c\x3d\x64\xbb\x30\x6a\x15\xae\xbb\xfe\xcb\x88\x34\xe0\xa1\x80\x48\x04\xc2\x94\x00\x95\xa5\xa9\x90\xc6\x3b\x7d\xdf\xc9\x3c\x89\x5b\x94\xc8\xa3\x36\xe9\x98\xc2\x33\xb0\x61\x04\x93\x40\xcf\xb0\x5d\xd1\xdc\x65\x3d\xc2\x08\x14\xb2\x6d\x43\x0c\x85\x5b\xbc\x44\x4d\x62\xa2\xc9\xd7\x5b\xca\x87\x06\xac\xe7\x5a\xcc\x93\x10\xbe\xb2\xdc\x5a\x29\xdc\x66\xac\x84\x60\x61\x67\x89\xdf\xe8\x39\x26\x52\x30\x6d\x9c\xd2\x5b\x94\x0f\x28\xfd\x29\xc4\xb7\x1b\xcc\x87\x4e\xd8\x2d\xef\x3e\xca\x8e\xc7\x59\x52\xec\x63\xb2\x1c\xb0\xa7\x2e\x79\x06\xe4\xb1\x05\xd2\xf4\x32\x6e\x5b\xa7\xc3\xb0\x39\xc2\x7a\xe8\xdf\x59\x37\x0b\x90\x04\x13\x21\x8f\x97\x9b\x6e\xae\x50\xae\x71\xd7\xaa\xb6\x21\x9f\xa0\x5c\xff\xfc\xff\x9e\x66\xdb\xa5\xdf\xea\x65\x99\x64\xb2\x74\x2d\x34\x61\x9e\x14\x88\x48\x4a\x22\x93\x9d\x13\x95\x73\xa1\x95\x45\x9b\xa3\x3b\xec\xc8\x12\xdb\xf1\x65\x65\xe5\x03\x2e\x62\x54\xb9\x19\xab\x1e\xc4\xb8\xa5\xdc\xa6\xc5\x70\xf9\x4b\x93\x8f\x66\xfd\x44\x64\xd5\x20\xfd\x17\x31\xf2\x2a\xec\xf5\xb2\x9c\x34\x60\x21\x32\x70\xcb\xac\x33\x6c\xaa\xf3\xa5\x43\xe3\x3c\xaf\xda\x99\x13\x9a\x33\xf3\xf8\xdb\x0d\xf0\xaa\x02\xef\x2f\xe0\x42\xe8\xf7\xcc\xa7\x75\x56\x18\x8d\xd9\x52\xd9\xd2\x17\x09\x84\x86\xc6\x8f\xed\x8a\x8b\xad\xd7\x24\xe1\x6e\x96\x38\x19\xb2\x3a\x93\x3a\x94\xee\xb7\xc2\x4d\x19\xd1\x46\x49\x7c\x3d\xe7\x23\x14\xfc\x07\xa5\xe8\xdb\x3d\xf3\x28\xf6\x03\xa3\xf7\x36\xe8\xf6\x05\xef\x9f\x4f\x6e\x5e\xdb\xb2\xad\x13\xd9\x3b\x27\xef\x50\xa1\x1a\x80\xb6\x95\x63\x70\xb5\xcf\x03\x27\xee\x68\x9c\xaf\xc2\x24\xd5\xc7\xa6\x94\xb3\x34\x6e\xa9\xbd\x4e\x94\xf0\x63\xb5\x49\xb8\x6a\x5a\xb0\xca\xf7\xfb\x0b\x3c\x0a\x4d\x10\x88\xf6\x95\xbc\xde\xa3\xca\xbb\xa4\x70\x30\x95\x3b\x23\x1d\x2a\xe0\xb8\x60\x2b\x7c\xaa\xfc\xaf\x3b\x0b\xce\x1e\xef\x6e\x6d\xde\xa0\x8b\xce\x50\x11\x61\xfb\x0c\x1f\xb0\xde\xbc\x28\x03\x75\x60\x14\xe5\x11\xe6\x39\xf8\xd4\x23\x65\xb2\x17\xbf\xb5\x75\x34\x92\x13\xc6\x5c\x8b\x77\x7c\x33\x6f\xcf\xc0\x04\x57\x59\xd2\xda\x8c\x7b\x88\xd2\xec\x7f\xcb\x9b\xfd\x1e\xf6\x7a\x59\x29\x3e\x4c\x6e\x56\x2f\xef\xcd\x8a\xb4\x22\x64\xcd\x2f\x91\x8b\x2d\x8a\xc3\xed\xd6\xf5\x4f\xe7\x62\x1d\x10\xba\x0e\x18\x8c\x8a\x53\x1e\xb2\xe3\x32\x47\x02\xa0\xe0\x69\xf6\x22\x63\x4d\x0f\xbb\x41\x37\xfd\x80\xb1\xd5\x30\x02\x91\x90\xae\x0f\x69\x6a\x5d\x6a\x80\x62\x1c\x9c\x52\x2f\x78\x44\xa5\x91\xc4\x47\xb0\xa7\x57\x4d\x9d\x5b\x5a\x23\xcb\xf9\x3e\x82\x1f\x61\xc6\x55\x26\xd1\x61\x62\x00\x04\x7c\xa9\x02\xfc\x6c\x7e\xd0\x07\xac\x6c\xd9\xd6\x03\x16\x1c\xad\x4b\x33\x6a\x61\x7f\x54\x27\x0b\x06\xf0\x23\xdc\x48\x91\x92\x1d\xd1\xa5\x26\x9c\xed\xb5\xcd\x8b\x02\xaf\x01\x99\x3c\x10\xca\xec\xf0\x07\x6d\x74\x38\xf2\xb3\x1e\x97\xd7\x0f\x0a\x52\x5c\xca\x0a\x94\x53\x4d\x49\xd3\xd6\x23\xc1\xb7\x74\x97\x49\x12\xfa\x0f\x31\x6e\x49\xc6\x6c\x8b\x0e\xde\x23\xd1\x99\xac\x74\x06\xdc\xf5\x58\x1e\x1c\x09\xce\x31\xd2\xb9\x58\xbb\x0d\x9a\x48\x49\x9a\x3d\xc7\x66\x27\xac\x06\xf0\xe5\x7d\x6f\x49\x1b\x73\x6d\xb0\x6a\x66\x0f\xe8\x19\x6b\x0f\x20\x10\xaa\xcd\x80\x21\x90\x9d\x71\xb4\xd5\x86\x8b\x55\x82\x54\xa4\x19\xf3\x9e\xb2\x26\xba\x0e\x5f\xa0\x33\x69\x72\x3f\xeb\xb4\x09\xa8\x2c\x8a\x50\x29\x53\xae\x30\xc1\x77\x7d\x99\x71\x3b\x1a\x63\xa4\xe0\x24\x67\x17\xba\x9e\xe1\x13\xb0\x85\xf4\x71\xac\xac\x91\x73\xdb\xd9\xe5\x42\x97\x50\x8d\x33\x9b\xee\x73\xa3\x90\x0c\xce\x51\x3f\x0d\xf8\x82\x2a\x5d\xee\x05\x4b\xfc\x23\x43\xa5\xd5\x00\x96\x6e\xb8\xc5\xf5\xa6\x7c\xc1\x9e\x10\x4e\xb7\xa8\x34\x90\x6d\x57\x95\x6a\x4f\x32\x9c\xea\x7a\xa3\x73\xf4\xba\xd6\x70\x44\x18\x43\xe9\x5d\x06\x24\xe4\x1e\x81\x34\x88\x6b\x8f\x44\x26\xf9\xb6\xc7\x45\x84\xbb\xec\xc2\x4b\x2c\x21\xea\xbe\xe9\x29\xf2\x33\x6c\xaa\xb4\xf7\x8d\xe6\xcf\x96\x75\x54\x63\xd2\xaa\xf2\x8f\xfa\xe9\xf2\x1e\xcf\xf2\xbe\x27\xd8\xc6\xe3\xd6\xea\xd9\xe1\xdc\xdc\x6d\x24\x52\x6c\xcd\x96\xcb\x14\x6c\x84\x60\x48\xda\x2a\xcf\x1a\x19\xde\x7c\xcb\xb0\x3b\x5f\xaa\x18\xe8\xc7\x3d\xda\x19\xb2\x8a\xb6\xf8\xbc\x30\x37\xa1\xa0\x3d\x9d\x20\xdd\xa9\x80\x27\xed\x4e\xd9\xfd\xd7\x03\x98\x6f\x21\xe3\xe1\xe0\xaf\xd0\x41\xaa\x80\x28\x93\x9d\xc4\x6d\xa7\x16\xc5\xb5\x71\x47\x34\x2a\x25\x11\x82\x03\x3a\xa8\x99\xbc\x9d\x52\x31\xee\x63\x31\xbb\x5d\x42\x42\xd2\xb4\xcb\x81\xb8\xeb\xb0\x47\xee\x26\xa7\x8c\xbd\x55\x88\x76\x93\x85\xed\xfd\xba\x70\x05\x12\x9e\x92\x5b\x67\x26\xec\xae\x7a\xed\xfc\x14\x7b\x2b\x12\xfb\x34\xbe\xbc\x28\x78\xe9\xbb\x73\x81\x8a\xd6\x1e\x80\x57\xed\x89\xbc\x7c\x04\xfd\xe7\x15\x69\x97\x2d\x50\x9f\x8a\x05\x73\x5e\x0d\x05\x96\x10\x89\xbe\xad\x4a\x8a\xae\xfc\x93\x3e\x7b\xb2\xa8\xeb\x01\xdd\x71\x21\xbd\x2a\x94\xfa\xae\xca\x08\x5c\xba\xac\x20\x22\xdc\x06\xaf\x4e\xe7\x37\x59\xe4\x07\xae\x03\x98\x58\x7f\xa7\x82\xc3\xf3\x16\x11\xd6\x85\xf3\xb9\x28\x93\x26\xe1\x6f\x39\xa1\x05\xeb\x0c\x7c\xc3\x98\x57\x12\x1c\xef\xec\xeb\x11\xa4\x2d\x5a\xf4\xba\xbc\x3f\x43\xf2\x90\xcf\xd2\x1a\x1e\x38\xc7\x4a\x4d\x45\xca\x11\xf0\x33\x75\xa1\xc0\xa1\x5b\xb6\x3c\x63\x31\xed\x2c\x28\x9f\x70\x97\xd0\xdd\x13\x17\xad\x36\x88\xbc\x9c\x00\x7a\x97\x2e\xac\xfb\xe8\x1a\x58\x29\x2b\x8f\xef\x2d\xf7\x33\x6e\xc4\xd0\x72\xfe\x13\xae\x42\x65\x5f\x2e\xb3\xb9\x6c\xc2\x7c\xd9\xe4\x66\x1c\xc7\xd4\x65\xe0\xed\x39\x8e\x55\x19\x8e\xdd\xcc\x2f\xa5\xdd\x5a\x54\x24\x60\x63\x76\x39\x05\xb5\x07\x8a\x36\x34\x9b\xdc\xb7\x3b\xa4\xe3\x03\xca\xa3\xaf\x1c\xeb\x16\xf3\x54\xb2\x94\x27\x46\xed\x2d\xed\x7f\x92\xa5\x7f\x92\xa5\x47\x92\xa5\xd3\xcc\xed\x9f\x7c\xe9\x9f\x7c\xe9\xef\x96\x2f\x85\x27\xae\x9f\xf2\x48\xe0\xe9\x34\x98\x8e\x13\x58\x0f\xf0\xb1\x17\x4e\xb7\xaf\xa7\x81\x76\x9c\x71\xfb\x17\xac\x0e\xb4\xc4\x29\xd8\x21\xf7\xae\xbb\x4d\xa6\x4f\x99\xab\x6f\x48\x3c\x72\x18\x07\xa7\xc9\xbc\xbd\x2f\xd1\x7d\x10\xf7\x08\xc5\x36\x07\xaa\x74\x0d\xc2\x21\x9b\x16\xf6\x8b\xa4\xad\x68\xf7\xf1\x25\x82\xee\x8a\x8e\x21\x4c\x5d\xd7\x46\x85\x28\x6d\x82\x92\xd2\x95\xe6\x84\xdf\xa0\xcb\x2a\x6c\x3b\x2c\xc7\x22\x8c\xef\x91\x22\x9b\x2c\x4e\xa9\xc3\xaa\x1e\x88\x8d\x12\x0c\x75\x7b\xb7\xdf\x5c\xf9\xf0\x54\x31\x91\x66\x14\xb4\x3a\x87\x53\x5a\xfe\x76\x83\x9a\xbc\x9d\xc8\x4e\xa7\x7a\x8a\x4b\xad\xb7\x7b\xdf\xfe\xe2\x81\x3e\x4f\x46\x2b\x85\xb0\x26\x29\xc5\xcf\x1a\xb9\xa5\x62\xe8\xf1\x5b\xdb\xc6\x10\x92\x18\xc4\xb6\x93\xf4\xc6\xab\x6b\xab\xdf\x93\x4c\x69\x91\x04\x53\x99\xe2\xd6\x46\x6c\xc1\x1f\x3b\xcd\x0a\x97\x75\x9b\xc2\x8d\x0a\xf9\x38\xbe\x41\x50\xe8\xc6\x79\x8a\xf2\xc2\x84\xea\x92\x1d\x91\x94\xba\x93\xdc\xa7\xc5\xf4\xef\xb7\x83\xb7\x3f\x17\x08\x18\xfc\x25\x27\xac\x7a\x84\x76\xda\xec\xd3\x2c\x7f\xf7\xac\x83\xd5\xa5\xf6\xf0\x98\xe7\x7b\xb1\x63\xdf\xdb\x3b\xc6\x36\x95\x4c\x08\x27\x3b\x8c\x61\x3e\xf5\x23\x1e\xb5\x31\x97\x72\x6a\x66\x39\x34\x9f\xda\x2f\xf3\x36\x98\x8f\xa4\xfa\x6c\xb5\x48\x51\x37\x99\xce\x67\x01\x4d\x22\x27\x31\x12\x49\x82\x3c\xae\x6b\xa6\x31\x0a\x03\xaf\x73\xbc\x6a\xbe\xad\x4f\xc7\x17\xe5\xa3\x02\xd2\x75\xce\x96\x77\xaa\xed\xcb\x0f\x84\x65\x58\x13\xa9\xb7\xe4\xd5\x7c\x1a\xe2\xc3\xfa\x3e\xdb\x60\x5f\x1d\x95\xc6\x64\x5d\x44\xd5\x0a\x5c\x17\x00\x4a\x0d\x5b\xca\xb7\x92\xb8\x4f\xd7\x32\x89\x4b\x23\xba\x67\x8b\x72\xde\x80\xd1\xb6\xb6\x1e\x28\x9a\x6f\xcd\x78\x96\x9c\xa0\x0c\xee\xac\xa0\xfc\xb2\xc5\xb3\xc1\x67\xaa\xc0\xe7\xf6\x55\x16\xf0\x01\xdc\x08\xa5\xe8\x86\xa1\xe3\xac\x1a\xc1\xfc\xea\xfd\x62\x7c\xbb\x5c\xac\x26\xcb\xd5\x62\x76\xb7\xfc\x74\x33\xbb\x5b\x5d\xdd\xde\xcc\x26\xf3\xf7\xf3\xd9\xb4\x07\xd7\x57\x77\x37\x8b\xd9\x65\x0f\x2e\x57\x17\xcb\xf9\xdd\xe4\xe2\x7a\x35\x2d\xb7\xbd\x91\x67\x49\x99\x73\xfd\xa7\x40\x56\xd6\x7a\xe8\x95\x7b\xa5\x8d\xf2\xfb\xfe\x23\x99\x86\x84\x1a\x71\x9d\xe4\x95\xdd\x4d\x47\xf8\x3b\x4d\xb4\x17\xe5\xaf\x71\x1a\x42\x29\x8d\xa1\x4d\x6e\x3c\x72\xb9\x11\x96\x4a\xe2\x41\x89\x00\xa5\x7d\xc4\xa1\x82\xbf\xfc\x98\xe8\x45\x03\x7e\x69\xed\x57\x8c\x8b\xbe\x2f\x3b\x4e\x37\x6c\xee\xc1\xbb\x7c\xd7\x60\xa3\x34\x49\x52\x6f\x84\x15\x16\x27\x42\x59\xd7\x61\xe2\x77\x94\x23\x05\xe6\x85\x0d\xa3\x6a\x1f\x1a\x12\xfe\xe3\x82\x89\xfd\xc6\xc5\xf9\xa8\x70\x3e\xeb\x4f\x50\x2b\x60\x5d\x85\xac\xcc\x3f\x84\x41\x42\x79\xa6\x51\xf5\xac\x9f\x8a\x04\xd7\x94\x67\x22\x53\x0d\xe4\x6d\x2d\x1f\x8b\xf0\x4d\x73\x05\x62\x29\x0f\xe8\x81\xf0\x1f\x95\xec\xc9\x03\x02\x37\xbb\x04\xe4\x31\x2e\x15\xcf\xec\xd8\x2b\xf7\x72\x4c\x00\xaf\xc0\xdc\xa0\xab\x0b\x9e\x3d\x51\x1b\x46\xe8\x9e\xef\x87\x2e\xaa\x1f\x57\x35\x24\x6b\x1c\x47\x80\x9e\x8f\x03\xca\xe6\x41\xe5\x49\x68\xda\x2d\x9f\x8d\xe2\x15\x49\x3a\x15\xaf\x39\xec\x6f\xd8\xdc\x83\x8c\xd3\x3f\xb2\xf2\xe4\x7f\x3d\x9c\xb4\x9c\xb4\xfa\xa2\xce\x59\x0e\xac\xf3\x51\xc5\x1f\x4b\x13\x8a\x3f\x56\x3f\xee\x2b\x7e\xdc\xd1\xf8\xcb\xba\x67\xa2\x4c\x05\xa6\xef\x61\x84\x28\x39\x80\x75\xe5\x95\x75\x29\x2b\x74\x1f\xc7\x2c\xde\x4f\xe0\xed\xdb\x77\x3f\xe5\xdf\xca\x6b\x98\x5e\xdd\x56\x60\x5a\x9f\x31\x82\xb7\x03\x18\x6b\x67\x2f\x95\x21\x66\x43\x87\xff\x60\xe7\x9d\x6d\xac\xd8\x2d\x22\xc1\x95\x9f\x65\x61\xe2\x50\xcb\x5b\x22\xa2\x10\x08\x4b\xf7\x84\x67\x09\x4a\x1a\x95\xc1\x09\x09\xeb\xfe\x1a\x7e\x2a\x60\xb9\x8f\x87\x4c\x1e\x81\x3c\xef\x0f\x56\xde\xaf\x42\x0f\xb0\xe0\xa3\x1d\xc4\x88\x08\xb7\x03\x28\xee\x6b\x76\x93\x49\xa8\x4a\x06\xf0\xfa\xff\xaf\x7b\x1e\x2a\x24\xe4\x33\x4d\xb2\xc4\x13\x54\x8d\x49\xdb\xda\xf0\xf6\xf7\xcf\x55\x47\x2f\xe1\xe7\x6b\xe4\x4d\xe5\x0b\xc5\x56\x9b\xf1\xb0\xbf\xd9\x64\x1e\x9f\x55\xad\xcc\xa8\x1a\x57\x18\x7e\xba\x04\x4f\x0e\x9b\x98\xe6\x63\xaa\x9c\xd4\x66\x6e\xdc\x37\xa7\xb5\xfd\xed\x39\xff\xf3\x67\xd6\x6f\xcd\x6b\xa7\x64\x34\xf5\x85\x5f\x11\x6f\x2c\x88\xe6\x89\x42\x6b\xed\x7f\x22\xcf\xbb\xea\xdf\x48\xc4\x1d\xb3\x22\xa7\x7c\xe6\x34\x11\xf1\xc9\xd3\x21\x96\x28\xf3\x42\x2d\xc1\x7b\x94\x47\x0d\x3e\x35\xdd\xa3\xaf\x3d\x9d\x5c\x4f\x64\x9a\xbb\x1a\xf9\xdf\xe4\x7a\x5a\x4b\xf8\x26\x8b\xd9\x78\x39\xbf\x3a\xef\xc1\x62\x36\x9e\x7e\xea\xc1\x74\x76\x31\x73\x37\x56\x37\x53\xf7\xa8\x01\xf7\x76\xb6\xf8\x7d\x3e\x99\xdd\x85\x15\xf5\xc1\x88\x13\x45\x06\x2d\x09\x25\x58\x13\xa9\xe3\xd9\x5c\xe0\xd1\x6e\x3c\xb0\x54\x34\xee\x06\xa2\x1a\x0f\x02\x05\x8d\x07\x75\x12\xf3\x05\x2e\x4c\x7d\x4d\x29\xb8\xf2\x6f\x7e\x9b\xe9\xb8\x2c\xaa\x54\x1c\xae\x56\xa5\x92\xb0\x50\x06\x9b\x60\x55\x83\xa5\x0a\x31\x96\x44\x52\x28\x55\xfb\x66\xad\x54\x7a\x9b\x72\x8e\xb4\x3d\x82\x1a\x48\xff\x39\x91\x3b\xeb\xf0\xe7\x4d\xf9\xda\xfc\xff\x8b\x51\x26\xa4\xdb\xb8\x4e\xf3\x2f\xae\x7a\x40\x35\xec\xd0\x14\x87\x55\x88\xf9\xa4\x9e\xc3\xf5\x8e\x3e\xff\x63\xa5\xf6\x81\xc8\x6f\xcf\xba\x5b\x07\x1f\x5f\xf0\xe3\x2c\x53\x35\xe4\x73\x8c\xa7\x12\xfd\x3f\x01\x00\x00\xff\xff\x30\x9e\x1a\x9e\x3e\x48\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7c\x5b\x6f\xdb\xc6\xb6\xf0\xbb\x7f\xc5\x42\xf3\xd0\xa6\xd0\x25\x49\x3f\xf4\x41\x7b\x03\x1f\x54\x49\x71\xd5\xf8\x06\x59\x6a\x90\xf3\x62\x8d\xc8\x25\x69\x8e\x87\x33\xec\xcc\xd0\x8a\x76\x91\xff\x7e\x30\x37\xde\x69\xcb\x89\x7b\xd0\x03\x94\x2f\xb1\xc8\xe1\x9a\x75\xbf\xcd\x62\x5e\xc1\x44\xa4\x47\x49\x77\x7b\x0d\xef\xde\xbc\x7b\x07\xe7\x42\xec\x18\xc2\xc5\xc5\x64\x00\x63\xc6\x60\x61\x1e\x29\x58\xa0\x42\xf9\x80\xf1\xe0\xec\x15\x9c\xbd\x82\x0b\x1a\x21\x57\x18\x43\xc6\x63\x94\xa0\xf7\x08\xe3\x94\x44\x7b\x0c\x4f\x7a\xf0\x3b\x4a\x45\x05\x87\x77\x83\x37\xf0\x83\x59\xf0\x9d\x7f\xf4\xdd\xeb\x7f\x9d\xbd\x82\xa3\xc8\x20\x21\x47\xe0\x42\x43\xa6\x10\xf4\x9e\x2a\xd8\x52\x86\x80\x9f\x23\x4c\x35\x50\x0e\x91\x48\x52\x46\x09\x8f\x10\x0e\x54\xef\xed\x36\x1e\x88\xc1\xe3\x93\x07\x21\x36\x9a\x50\x0e\x04\x22\x91\x1e\x41\x6c\xcb\xeb\x80\x68\x87\xb1\xb9\xf6\x5a\xa7\xa3\xe1\xf0\x70\x38\x0c\x88\xc5\x76\x20\xe4\x6e\xc8\xdc\x4a\x35\xbc\x98\x4f\x66\x57\xb7\xb3\xfe\xbb\xc1\x1b\xf7\xce\x8a\x33\x54\x0a\x24\xfe\x91\x51\x89\x31\x6c\x8e\x40\xd2\x94\xd1\x88\x6c\x18\x02\x23\x07\x10\x12\xc8\x4e\x22\xc6\xa0\x85\xc1\xf8\x20\xa9\xa6\x7c\xd7\x03\x25\xb6\xfa\x40\x24\x9e\xbd\x82\x98\x2a\x2d\xe9\x26\xd3\x15\x76\x05\xfc\xa8\xaa\x2c\x10\x1c\x08\x87\xef\xc6\xb7\x30\xbf\xfd\x0e\x7e\x19\xdf\xce\x6f\x7b\x67\xaf\xe0\xe3\x7c\xf9\xeb\xf5\x6a\x09\x1f\xc7\x8b\xc5\xf8\x6a\x39\x9f\xdd\xc2\xf5\x02\x26\xd7\x57\xd3\xf9\x72\x7e\x7d\x75\x0b\xd7\xef\x61\x7c\xf5\x09\x3e\xcc\xaf\xa6\x3d\x40\xaa\xf7\x28\x01\x3f\xa7\xd2\xe0\x2f\x24\x50\xc3\x48\x27\xbd\x5b\xc4\x0a\x02\x5b\xe1\x10\x52\x29\x46\x74\x4b\x23\x60\x84\xef\x32\xb2\x43\xd8\x89\x07\x94\x9c\xf2\x1d\xa4\x28\x13\xaa\x8c\x38\x15\x10\x1e\x9f\xbd\x02\x46\x13\xaa\x89\xb6\x77\x1a\x44\x0d\xce\x28\xdf\x8a\xd1\x19\x80\xa6\x9a\xe1\x08\xce\xef\xf1\xd7\x6c\x33\xbc\xc4\x64\x83\x52\xed\x69\x7a\x06\x10\xa3\x8a\x24\x4d\x0d\x88\x11\x2c\xf7\xe8\x17\x41\xb1\x08\x24\x2a\x91\xc9\x08\xcf\x00\x3e\xf7\xe3\x88\xf5\x95\x96\x59\xa4\xfb\x9c\x24\x38\x82\x0a\x34\xf7\x7c\x4f\x54\x9f\x92\x64\x04\x5b\xc2\x14\x9e\xa5\x44\xef\x95\x41\x63\x87\xda\xfc\xd3\xb2\xe9\x36\xe3\x91\xf9\x65\x74\xd0\x0a\x71\x87\x46\xf5\xb6\x42\x26\x96\x3a\x20\x1b\x91\x69\x20\xd5\xdd\x00\x52\x22\x49\x82\x1a\xa5\x72\x80\xfb\xd0\x82\x94\xb9\x82\xf2\x8c\x40\xcb\x0c\xfd\xcd\x0a\x1a\x63\xd8\x66\x8c\x01\xe5\x4a\x5b\x55\x17\xdb\xfa\x76\x46\xe9\x8e\xa7\x12\x60\x17\xff\xed\x48\x88\x91\xa1\xc6\x53\x69\x70\xab\xff\x16\x28\x8f\x19\x7b\x2e\xd6\x8c\x9d\x8a\x77\x2a\xc5\x7f\x63\xa4\x1f\x43\x5a\x45\x7b\x4c\xc8\xc8\xff\x02\xd0\xc7\x14\x47\x60\x1c\x06\xdf\x55\x60\x31\x11\x59\x71\x7f\x03\x30\x46\xd5\xc9\x76\x62\xd6\xb6\x68\x59\x42\xf8\xf1\xff\x24\xf5\x26\xd6\x08\x8e\x5c\x5b\x24\xdd\x52\x8f\x6f\x41\x4e\x78\xd7\xbb\xb5\x86\xda\x39\x2f\x44\xe3\x9c\x36\x35\xfc\xf3\x4f\xff\xe7\x97\x2f\xc3\x80\xa4\xb9\x1b\xfe\xfe\xf2\x65\x98\xe4\x60\xcc\x03\x43\xcf\x97\x2f\x15\x80\x99\x42\xd5\x57\x9a\x68\xec\xef\x29\xd7\x15\xa2\xdc\x8a\x94\x48\xe4\xba\x1f\x09\x6e\x42\x21\xca\x3a\x73\xdd\x2a\x46\x36\xc8\xd4\x08\xdc\xbf\x95\x47\xc6\x73\x46\x12\x89\xc6\x16\xe8\x35\xb7\x5a\x7e\x24\x91\xc4\x7d\x4d\x13\x14\x99\x1e\xc1\x9b\xca\x33\xeb\x89\xba\x1e\x3a\x63\x69\x3e\x75\x42\x11\x9b\x36\xcd\xf0\xbf\x9d\xcc\xf3\x1f\x55\x4a\xfb\x75\x55\x48\xa5\x48\x51\x6a\x8a\xaa\x90\x3c\xc9\xf4\x5e\x48\xaa\x8f\xc5\xad\xd6\x9d\x0b\x74\x77\xc2\x47\x9d\x71\x78\xb5\x6d\x89\x03\x51\xa8\x45\xdb\xe2\x8a\x69\x7d\x7f\x6d\xff\x20\x6c\x00\xbf\x8a\x83\x4d\x22\x62\xe4\x9a\x6e\x8f\x70\x10\xf2\x9e\x09\x12\x2b\xd8\x4a\x91\xb8\xfc\xa8\x80\x3c\x28\x41\x84\x3c\xa6\xc7\x22\xca\x12\xe4\x2e\x30\x9b\x5c\xe2\xa3\x07\x02\x73\x0b\x56\x1f\x6d\xb8\x4f\x84\x44\x88\x51\x13\x6a\xb4\xc1\x64\x45\x6a\x34\x1c\x46\x4c\x64\xf1\x60\x67\xf3\xbf\x41\x24\x92\xe1\x7d\xb6\x41\xc9\x51\xa3\xea\x23\xdf\x51\x8e\xc3\x58\x44\x6a\xb8\x17\x87\xbe\x16\xc3\x80\x5f\x9f\x7a\xd0\xdf\x97\x50\x6a\x63\xba\xb9\xc2\xda\x1b\x29\x1e\x68\x8c\xb2\xfa\xb4\xc5\xbe\x3b\xe5\x30\xaf\x41\xaa\xad\x36\x7a\x79\xcd\xd9\xb1\xa2\xcd\xad\x22\xb8\xce\x74\x9a\x69\x10\x9c\x1d\x07\x30\xe6\x39\x8a\x86\x06\x0b\x19\xf4\x9e\x68\x90\xb8\x65\xc6\xa0\x2d\x9f\xd7\x54\xa9\x0c\xe5\xba\x06\x17\x4c\x0a\x68\x9e\x07\xde\x94\x80\x09\xc1\xaa\x32\x73\x20\xbe\x81\x01\xf6\xfd\x47\x29\xcb\x75\x6b\x0c\xbf\xdd\x5e\x5f\xc1\x47\xdc\xc0\x52\xdc\x23\x87\x1f\x7e\xfb\xb8\x7c\xed\x51\x80\xd5\x62\x3e\xc8\x49\x82\x24\x53\xba\x41\x97\xd2\x44\x6a\x97\x84\xaf\x83\xbe\xac\x4d\x2a\x08\x1b\x13\xa6\x1f\x08\xa3\x31\xac\x16\x17\x6e\x09\x43\xbe\xd3\x7b\xf8\xf7\xbb\x37\x6f\xde\x40\xb4\x27\x92\x44\xc6\xfb\x0f\x1a\x60\xe7\x5b\x50\xa8\x7b\x86\x67\x3c\x54\x1e\x07\xca\x98\x89\xa1\xe2\xe0\xc1\x5e\xcf\xa7\x13\xd0\x06\xed\xb2\x29\xd0\x36\xf2\xc1\x26\x40\x99\x01\xa7\x69\x44\xb4\xab\x1b\x6a\x42\xb9\x0b\x42\xb9\xb3\x42\x71\xf0\x63\xaa\x22\x93\xee\x1e\x1b\x10\x2d\x3e\x1b\x34\x39\xb0\x09\x76\x2e\x47\xb7\x28\xac\x16\x73\xb3\x9f\xc5\xd2\xec\x55\xc5\x11\x3d\x8a\x4d\xaa\x27\x0c\x89\x91\x6f\xc1\xf3\x98\x2a\x53\x52\xa8\xa6\xbd\x96\x04\x13\x11\x6e\x8a\xa5\x0d\x42\x4c\x25\x46\x9a\x35\x71\x4d\x44\x4c\xb7\x14\xe3\x7f\x01\xd5\x56\x90\x66\x75\x64\xb6\xc3\x18\x7e\x30\xe2\x6a\x3a\x04\xbf\x77\xfc\x1a\x36\xb8\x15\xb2\x6e\x2b\x00\x99\x32\xb8\x12\xe0\x78\x08\x1a\x63\x41\x49\xec\x23\x27\x1b\x66\x9e\x36\xc0\xbe\xae\x92\x1d\x78\x9f\x5b\xad\x10\xec\xeb\x55\xff\x63\x0b\xb4\x97\xb1\x7f\x93\xea\x98\x3d\x42\x15\xd9\x6e\xc8\x40\x79\x53\x4d\xf6\x34\xda\x17\xc2\x0a\x5a\x23\x31\x12\x3b\x4e\xff\x83\xb1\x05\x2e\x6d\xc1\x47\xc0\xb0\x94\x61\x93\x6d\x0d\xb0\x86\x34\xa3\x7a\x60\x4a\x23\xeb\x88\xa8\x02\xb5\xb7\x02\xdd\xa0\x3e\x20\xf2\x5a\xc2\xa9\xdc\xb2\x0d\x32\xc1\x77\xa0\x45\xd3\x44\xcc\xe3\x5f\xb3\xcd\x00\xde\x9b\xf2\xd5\x42\xde\x0b\x65\xaa\x4f\xca\xe1\xcf\x9b\xc5\xf5\x6f\xb3\xc9\xf2\x6e\x3e\xfd\xd2\xab\xf2\xc0\x92\xee\x12\xbe\xa6\xe3\x53\xb0\x2e\xbf\x3a\xd8\x67\x9b\x01\x75\xe1\x64\xdd\x03\xc2\xf4\x5e\x64\xbb\x7d\xb0\x5d\x50\x99\x8d\xb3\xc6\x7e\xa2\x3d\xe1\xbb\xa6\xda\x51\x6e\x34\x0e\x25\x3c\xb8\x46\x82\x72\x32\xa1\x0a\xc6\x37\xf3\x42\xbb\x5c\xd2\xb2\xa4\x09\x36\x63\x79\x43\x97\x1c\xf2\x23\x30\xe6\x6a\x53\x8f\xee\x38\x3f\xc9\xe1\x9e\x9d\xa0\x55\xdd\x1a\xf5\xd1\xb8\x36\xc3\xc6\x52\x5d\x7b\x20\xca\xe3\x1d\x0f\x2a\x18\x94\x22\x2e\x4d\x92\x4c\x1b\xc3\xac\xed\xe5\xf2\xa6\x97\xa7\x77\x9a\xc3\xfd\x6b\xe8\x75\x78\x7f\x0d\xbd\xc5\x4e\x4f\x13\xdc\x20\x2a\x7f\xb9\x33\x03\x2b\xad\xc9\x35\xac\xc8\xc8\x7b\xae\xd3\xe1\x6a\x9e\x9f\x7f\xea\x8c\x66\x97\xc6\xd9\x26\x44\x47\xae\x4b\x25\x71\x87\x9f\x47\xb0\xfe\x71\x0d\x4b\xd7\xd8\x42\x16\x1b\xbd\x4f\x25\x2a\xe4\xda\xa6\x61\x0c\x77\x24\x3a\x42\x9a\xc9\x54\x28\x54\x83\x22\x81\x42\x1e\xa7\xc2\x64\xfa\xcf\x4e\x4f\x67\xfe\xcd\x53\xb2\xd3\x96\xb5\x1d\xf9\x43\x58\x59\x29\xf8\xb4\x30\xfa\x11\xed\xcb\x2c\x1b\x9c\x90\x03\xee\xee\x71\xc2\x32\xa5\xbb\x92\x9f\x06\x89\x2d\x64\x9e\xe7\x30\xba\xd6\x75\x11\xdb\xf9\x66\x07\xe9\xe7\x1f\x66\xfd\xbc\x35\x56\x22\x7f\x00\xc6\x30\x72\x79\x52\xa7\x39\x0d\x2f\x56\xb2\x02\xeb\xf8\xcf\x3f\xcc\x20\x72\xfb\xd7\x13\x83\x2e\x7e\x81\x35\x44\xd7\x09\xbb\xa0\xfc\xbe\xf9\xf4\xd1\xd8\xd9\xc2\xbd\x45\x09\x5a\xcb\xea\xaa\x7d\xcc\x83\x61\x0e\xe0\x16\xd9\xb6\xcf\x28\xbf\x0f\xe1\xf1\x7c\x72\x93\xa3\x96\x37\x12\xcf\x3f\xcc\x5a\x80\x42\x4e\xb6\x0d\x3b\xf8\x99\x24\xa9\x31\xf6\xe1\x30\x2f\x5a\x7d\xed\x41\x52\xaa\x6c\xfd\x91\xd7\xd0\xc9\xb1\xef\xff\x2e\x95\xd0\x99\xea\x1f\x50\xe9\xb7\x7d\x32\xf4\x90\xed\xc2\xa8\x55\xb8\xee\xfa\x2f\x23\xd2\x80\x87\x02\x22\x11\x08\x53\x02\x54\x96\xa6\x42\x1a\xef\xf4\x7d\x27\xf3\x24\x6e\x51\x22\x8f\xda\xa4\x63\xaa\xce\xc0\x86\x11\x4c\x02\x3d\xc3\x76\x45\x73\x97\xf5\x08\x23\x50\xc8\xb6\x0d\x31\x14\x6e\xf1\x12\x35\x89\x89\x26\x5f\x6f\x29\x1f\x1a\xb0\x9e\x6b\x31\x4f\x42\xf8\xca\x5c\x6b\xa5\x70\x9b\xb1\x12\x82\x85\x9d\x25\x7e\xa3\xe7\x98\x48\xc1\xb4\x71\x4a\x6f\x51\x3e\xa0\xf4\x47\x10\xdf\x6e\x30\x1f\x3a\x61\xb7\xbc\xfb\x28\x3b\x1e\x67\x49\xb1\x8f\x49\x72\xc0\x1e\xb9\xe4\x09\x90\xc7\x16\x48\xd3\xcb\xb8\x6d\x9d\x0e\xc3\xe6\x08\xeb\xa1\x7f\x67\xdd\xac\x3e\x12\x4c\x84\x3c\x5e\x6e\xba\xb9\x42\xb9\xc6\x5d\xab\xda\x86\x7c\x82\x72\xfd\xf3\xff\x7b\x9a\x6d\x97\x7e\xab\x97\x65\x92\xc9\xd1\xb5\xd0\x84\x79\x52\x20\x22\x29\x89\x4c\x6e\x4e\x54\xce\x85\x56\x16\x6d\x8e\xee\xa4\x23\x4b\x6c\xbb\x97\x95\x95\x0f\xb8\x88\x51\xe5\x66\xac\x7a\x10\xe3\x96\x72\x97\x14\x5f\xfe\xd2\xe4\xa3\x59\x3f\x11\x59\x35\x48\xff\x45\x8c\xbc\x0a\x7b\xbd\x2c\x27\x0d\x58\x88\x0c\xdc\x32\xeb\x0c\x9b\xea\x7c\xe9\xd0\x38\xcf\xab\x76\xe6\x84\xce\xcc\x3c\xfe\x76\x03\xbc\xaa\xc0\xfb\x0b\xb8\x10\x9a\x3d\xf3\x69\x9d\x15\x46\x63\xb6\x54\xb6\x34\x45\x02\xa1\xa1\xeb\x63\x5b\xe2\x62\xeb\x35\x49\xb8\x9b\x25\x4e\x86\xac\xce\xa4\x0e\xa5\xfb\xad\x70\x53\x46\xb4\x51\x12\x5f\xcd\xf9\x08\x05\xff\x41\x29\xfa\x76\xcf\x3c\x8a\xfd\xc0\xe8\xbd\x0d\xba\x7d\xc1\xfb\xe7\x93\x9b\xd7\xb6\x6a\xeb\x44\xf6\xce\xc9\x3b\xd4\xa7\x06\xa0\xed\xe3\x18\x5c\xed\xf3\xc0\x89\x3b\x1a\xe7\xab\x30\x49\xf5\xb1\x29\xe5\x2c\x8d\x5b\x6a\xaf\x13\x25\xfc\x58\x6d\x12\xae\x9a\x16\xac\xf2\xfd\xfe\x02\x8f\x42\x13\x04\xa2\x7d\x1d\xaf\xf7\xa8\xf2\x16\x29\x1c\x4c\xdd\xce\x48\x87\x0a\x38\x2e\xd8\xfa\x9e\x2a\xff\xeb\xce\x82\xb3\x67\xbb\x5b\x9b\x37\xe8\xa2\x2d\x54\x44\xd8\x3e\xc3\x07\xac\x77\x2e\xca\x40\x1d\x18\x45\x79\x84\x79\x0e\x3e\xf5\x48\x99\xec\xc5\x6f\x6d\x1d\x8d\xe4\x84\x31\xd7\xdf\x1d\xdf\xcc\xdb\x33\x30\xc1\x55\x96\xb4\x76\xe2\x1e\xa2\x34\xfb\xdf\xf2\x66\xbf\x87\xbd\x5e\x56\x8a\x0f\x93\x9b\xd5\xcb\x7b\xb3\x22\xad\x08\x59\xf3\x4b\xe4\x62\x8b\xe2\x64\xbb\x75\xfd\xd3\xb9\x58\x07\x84\xae\xd3\x05\xa3\xe2\x94\x87\xec\xb8\xcc\x91\x00\x28\x78\x9a\xbd\xc8\x58\xd3\xc3\x6e\xd0\x8d\x3e\x60\x6c\x35\x8c\x40\x24\xa4\x6b\x42\x9a\x5a\x97\x1a\xa0\x18\x07\xa7\xd4\x0b\x1e\x51\x69\x24\xf1\x11\xec\xd1\x55\x53\xe7\x96\xd6\xc8\x72\xbe\x8f\xe0\x47\x98\x71\x95\x49\x74\x98\x18\x00\x01\x5f\xaa\x00\x3f\x9b\x1f\xf4\x01\x2b\x5b\xb6\x35\x80\x05\x47\xeb\xd2\x8c\x5a\xd8\x1f\xd5\xb1\x82\x01\xfc\x08\x37\x52\xa4\x64\x47\x74\xa9\x05\x67\x3b\x6d\xf3\xa2\xc0\x6b\x40\x26\x0f\x84\x32\x3b\xf9\x41\x1b\x1d\x8e\xfc\xa0\xc7\xe5\xf5\x83\x82\x14\x97\xb2\x02\xe5\x54\x53\xd2\xb4\xf5\x48\xf0\x2d\xdd\x65\x92\x84\xfe\x43\x8c\x5b\x92\x31\xdb\xa0\x83\xf7\x48\x74\x26\x2b\x9d\x01\x77\x3d\x96\x07\x47\x82\x73\x8c\x74\x2e\xd6\x6e\x83\x26\x52\x92\x66\xc7\xb1\xd9\x09\xab\x01\x7c\x79\xdf\x5b\xd2\xc6\x5c\x1b\xac\x9a\xd9\xd3\x79\xc6\xda\x03\x08\x84\x6a\x33\x60\x08\x64\x67\x1c\x6d\xb5\xe1\x62\x95\x20\x15\x69\xc6\xbc\xa7\xac\x89\xae\xc3\x17\xe8\x4c\x9a\xdc\xcf\x3a\x6d\x02\x2a\x8b\x22\x54\xca\x94\x2b\x4c\xf0\x5d\x5f\x66\xdc\xce\xc5\x18\x29\x38\xc9\xd9\x85\xae\x67\xf8\x04\x6c\x21\x7d\x1c\x2b\x6b\xe4\xdc\xf6\x75\xb9\xd0\x25\x54\xe3\xcc\xa6\xfb\xdc\x28\x24\x83\x73\xd4\x4f\x03\xbe\xa0\x4a\x97\x3b\xc1\x12\xff\xc8\x50\x69\x35\x80\xa5\x9b\x6c\x71\xbd\x29\x5f\xb0\x27\x84\xd3\x2d\x2a\x0d\x64\xdb\x55\xa5\xda\x63\x0c\xa7\xba\xde\xe8\x1c\xbd\xae\x33\x1c\x11\xc6\x50\x7a\x97\x01\x09\xb9\x47\x20\x0d\xe2\xda\x23\x91\x49\xbe\xed\x59\x11\xe1\x2e\xbb\xf0\x12\x4b\x88\xba\x6f\x7a\x8a\xfc\x00\x9b\x2a\xed\x7d\xa3\xf9\xb3\x65\x1d\xd5\x98\xb4\xaa\xfc\xa3\x7e\xba\xbc\xc7\xb3\xbc\xef\x09\xb6\xf1\xb8\xb5\x7a\x76\x38\x37\x77\x1b\x89\x14\x5b\xb3\xe5\x32\x05\x1b\x21\x18\x92\xb6\xca\xb3\x46\x86\x37\xdf\x32\xec\xce\x97\x2a\x06\xfa\x71\x8f\x76\x80\xac\xa2\x2d\x3e\x2f\xcc\x4d\x28\x68\x4f\x27\x48\x77\x28\xe0\x49\xbb\x53\x76\xff\xf5\x00\xe6\x5b\xc8\x78\x38\xf5\x2b\x74\x90\x2a\x20\xca\x64\x27\x71\xdb\x99\x45\x71\x6d\xdc\x01\x8d\x4a\x49\x84\xe0\x80\x0e\x6a\x26\x6f\x47\x54\x8c\xfb\x58\xcc\x6e\x97\x90\x90\x34\xed\x72\x20\xee\x3a\xec\x91\xbb\xb1\x29\x63\x6f\x15\xa2\xdd\x58\x61\x7b\xbf\x2e\x5c\x81\x84\xa7\xe4\xd6\x99\x09\xbb\xab\x5e\x3b\x3f\xc5\xde\x8a\xc4\x3e\x8d\x2f\x2f\x0a\x5e\xfa\xee\x5c\xa0\xa2\xb5\x07\xe0\x55\x7b\x22\x2f\x1f\x41\xff\x79\x45\xda\x65\x0b\xd4\xa7\x62\xc1\x9c\x57\x43\x81\x25\x44\xa2\x6f\xab\x92\xa2\x2b\xff\xa4\xcf\x9e\x2c\xea\x7a\x40\x77\x5c\x48\xaf\x0a\xa5\xbe\xab\x32\x02\x97\x2e\x2b\x88\x08\xb7\xc1\xab\xd3\xf9\x4d\x16\xf9\x69\xeb\x00\x26\xd6\xdf\xa9\xe0\xf0\xbc\x45\x84\x75\xe1\x74\x2e\xca\xa4\x49\xf8\x5b\x8e\x67\xc1\x3a\x03\xdf\x30\xe6\x95\x04\xc7\x3b\xfb\x7a\x04\x69\x8b\x16\xbd\x2e\xef\xcf\x90\x3c\xe4\x83\xb4\x86\x07\xce\xb1\x52\x53\x91\x72\x04\xfc\x4c\x5d\x28\x70\xe8\x96\x2d\xcf\x58\x4c\x3b\x0b\xca\xc7\xdb\x25\x74\xf7\xc4\x45\xab\x0d\x22\x2f\x27\x80\xde\xa5\x0b\xeb\x3e\xba\xa6\x55\xca\xca\xe3\x7b\xcb\xfd\x8c\x1b\x31\xb4\x9c\xff\x84\xab\x50\xd9\x97\xcb\x6c\x2e\x9b\x30\x5f\x36\xb9\x19\xc7\x31\x75\x19\x78\x7b\x8e\x63\x55\x86\x63\x37\xf3\x4b\x69\xb7\x16\x15\x09\xd8\x98\x5d\x4e\x41\xed\x81\xa2\x0d\xcd\x26\xf7\xed\x0e\xe9\xf8\x80\xf2\xe8\x2b\xc7\xba\xc5\x3c\x95\x2c\xe5\x89\x51\x7b\x4b\xfb\x9f\x64\xe9\x9f\x64\xe9\x91\x64\xe9\x34\x73\xfb\x27\x5f\xfa\x27\x5f\xfa\xbb\xe5\x4b\xe1\x89\xeb\xa7\x3c\x12\x78\x3a\x0d\xa6\xe3\x04\xd6\x03\x7c\xec\x85\xd3\xed\xeb\x69\xa0\x1d\x67\xdc\xfe\x05\xab\x03\x2d\x71\x0a\x76\xc8\xbd\xeb\x6e\x93\xe9\x53\xe6\xea\x1b\x12\x8f\x1c\xc6\xc1\x69\x32\x6f\xef\x4b\x74\x1f\xc4\x3d\x42\xb1\xcd\x81\x2a\x5d\x83\x70\xc8\xa6\x85\xfd\x1c\x69\x2b\xda\x7d\x7c\x89\xa0\xbb\xa2\x63\x08\x53\xd7\xb5\x51\x21\x4a\x9b\xa0\xa4\x74\xa5\x39\xe1\x37\xe8\xb2\x0a\xdb\x0e\xcb\xb1\x08\xb3\x7b\xa4\xc8\x26\x8b\x53\xea\xb0\xaa\x07\x62\xa3\x04\x43\xdd\xde\xed\x37\x57\x3e\x3b\x55\xcc\xa3\x19\x05\xad\xce\xe1\x94\x96\xbf\xdd\xa0\x26\x6f\x27\xb2\xd3\xa9\x9e\xe2\x52\xeb\xed\xde\xb7\xbf\x78\xa0\xcf\x93\xd1\x4a\x21\xac\x49\x4a\xf1\xb3\x46\x6e\xa9\x18\x7a\xfc\xd6\xb6\x31\x84\x24\x06\xb1\xed\x24\xbd\xf1\xea\xda\xea\xf7\x24\x53\x5a\x24\xc1\x54\xa6\xb8\xb5\x11\x5b\xf0\xc7\x4e\xb3\xc2\x65\xdd\xa6\x70\xa3\x42\x3e\x8e\x6f\x10\x14\xba\x71\x9e\xa2\xbc\x30\xa1\xba\x64\x47\x24\xa5\xee\x24\xf7\x69\x31\xfd\xfb\xed\xe0\xed\xcf\x05\x02\x06\x7f\xc9\x09\xab\x1e\xa1\x9d\x36\xfb\x34\xcb\xdf\x3d\xeb\x60\x75\xa9\x3d\x3c\xe6\xf9\x5e\xec\xd8\xf7\xf6\x8e\xb1\x4d\x25\x13\xc2\xc9\x0e\x63\x98\x4f\xfd\x88\x47\x6d\xcc\xa5\x9c\x9a\x59\x0e\xcd\xa7\xf6\xb3\xbc\x0d\xe6\xf3\xa8\x3e\x5b\x2d\x52\xd4\x4d\xa6\xf3\x51\x40\x93\xc8\x49\x8c\x44\x92\x20\x8f\xeb\x9a\x69\x8c\xc2\xc0\xeb\x1c\xaf\x9a\x6f\xeb\xa3\xf1\x45\xf9\xa8\x80\x74\x9d\xb3\xe5\x9d\x6a\xfb\xf2\x03\x61\x19\xd6\x44\xea\x2d\x79\x35\x9f\x86\xf8\xb0\xbe\xcf\x36\xd8\x57\x47\xa5\x31\x59\x17\x51\xb5\x02\xd7\x05\x80\x52\xc3\x96\xf2\xad\x24\xee\xbb\xb5\x4c\xe2\xd2\x88\xee\xd9\xa2\x9c\x37\x60\xb4\xad\xad\x07\x8a\xe6\x5b\x33\x9e\x25\x27\x28\x83\x3b\x2b\x28\xbf\x6c\xf1\x6c\xf0\x99\x2a\xf0\xb9\x7d\x95\x05\x7c\x00\x37\x42\x29\xba\x61\xe8\x38\xab\x46\x30\xbf\x7a\xbf\x18\xdf\x2e\x17\xab\xc9\x72\xb5\x98\xdd\x2d\x3f\xdd\xcc\xee\x56\x57\xb7\x37\xb3\xc9\xfc\xfd\x7c\x36\xed\xc1\xf5\xd5\xdd\xcd\x62\x76\xd9\x83\xcb\xd5\xc5\x72\x7e\x37\xb9\xb8\x5e\x4d\xcb\x6d\x6f\xe4\x59\x52\xe6\x5c\xff\x29\x90\x95\xb5\x1e\x7a\xe5\x5e\x69\xa3\xfc\xbe\xff\x42\xa6\x21\xa1\x46\x5c\x27\x79\x65\x77\xd3\x11\xfe\x4e\x13\xed\x45\xf9\x53\x9c\x86\x50\x4a\x63\x68\x93\x1b\x8f\x5c\x6e\x84\xa5\x92\x78\x50\x22\x40\x69\x1f\x71\xa8\xe0\x2f\x3f\x26\x7a\xd1\x80\x5f\x5a\xfb\x15\xe3\xa2\xef\xcb\x8e\xd3\x4d\x9a\x7b\xf0\x2e\xdf\x35\xd8\x28\x4d\x92\xd4\x1b\x61\x85\xc5\x89\x50\xd6\x75\x98\xf8\x1d\xe5\x48\x81\x79\x61\xc3\xa8\xda\x87\x86\x84\xff\xb2\x60\x62\x3f\x70\x71\x3e\x2a\x9c\xcf\xfa\x13\xd4\x0a\x58\x57\x21\x2b\xf3\x0f\x61\x90\x50\x9e\x69\x54\x3d\xeb\xa7\x22\xc1\x35\xe5\x99\xc8\x54\x03\x79\x5b\xcb\xc7\x22\x7c\xd0\x5c\x81\x58\xca\x03\x7a\x20\xfc\x17\x25\x7b\xf2\x80\xc0\xcd\x2e\x01\x79\x8c\x4b\xc5\x33\x3b\xf6\xca\xbd\x1c\x13\xc0\x2b\x30\x37\xe8\xea\x82\x67\x4f\xd4\x86\x11\xba\xe7\xfb\xa1\x8b\xea\x97\x55\x0d\xc9\x1a\xc7\x11\xa0\xe7\xe3\x80\xb2\x79\x50\x79\x12\x9a\x76\xcb\x67\xa3\x78\x45\x92\x4e\xc5\xfb\xbe\x71\xf0\x64\xd8\xdc\x83\x8c\xd3\x3f\xb2\xf2\xdc\x7f\x3d\x9c\xb4\x9c\xb4\xfa\xa2\xce\x59\x0e\xac\xf3\x51\xc5\x1f\x4b\x13\x8a\x3f\x56\xbf\xec\x2b\x7e\xdc\xd1\xf8\xcb\xba\x67\xa2\x4c\x05\xa6\xef\x61\x84\x28\x39\x80\x75\xe5\x95\x75\x29\x2b\x74\x5f\xc6\x2c\xde\x4f\xe0\xed\xdb\x77\x3f\xe5\x1f\xca\x6b\x98\x5e\xdd\x56\x60\x5a\x9f\x31\x82\xb7\x03\x18\x6b\x67\x2f\x95\x21\x66\x43\x87\xff\x5a\xe7\x9d\x6d\xac\xd8\x2d\x22\xc1\x95\x9f\x65\x61\xe2\x50\xcb\x5b\x22\xa2\x10\x08\x4b\xf7\x84\x67\x09\x4a\x1a\x95\xc1\x09\x09\xeb\xfe\x1a\x7e\x2a\x60\xb9\x2f\x87\x4c\x1e\x81\x3c\xef\x0f\x56\xde\xaf\x42\x0f\xb0\xe0\xa3\x1d\xc4\x88\x08\xb7\x03\x28\xee\x53\x76\x93\x49\xa8\x4a\x06\xf0\xfa\xff\xaf\x7b\x1e\x2a\x24\xe4\x33\x4d\xb2\xc4\x13\x54\x8d\x49\xdb\xda\xf0\xf6\xf7\xcf\x55\x47\x2f\xe1\xe7\x6b\xe4\x4d\xe5\xf3\xc4\x56\x9b\xf1\xb0\xbf\xd9\x64\x1e\x9f\x55\xad\xcc\xa8\x1a\x57\x18\x7e\xba\x04\x4f\x0e\x9b\x98\xe6\x63\xaa\x9c\xd4\x66\x6e\xdc\x07\xa7\xb5\xfd\xed\x39\xff\xf3\x67\xd6\x6f\xcd\x6b\xa7\x64\x34\xf5\x85\x5f\x11\x6f\x2c\x88\xe6\x89\x42\x6b\xed\x7f\x22\xcf\xbb\xea\xdf\x48\xc4\x1d\xb3\x22\xa7\x7c\xe3\x34\x11\xf1\xc9\xd3\x21\x96\x28\xf3\x42\x2d\xc1\x7b\x94\x47\x0d\x3e\x35\xdd\xa3\xaf\x3d\x9d\x5c\x4f\x64\x9a\xbb\x1a\xf9\xdf\xe4\x7a\x5a\x4b\xf8\x26\x8b\xd9\x78\x39\xbf\x3a\xef\xc1\x62\x36\x9e\x7e\xea\xc1\x74\x76\x31\x73\x37\x56\x37\x53\xf7\xa8\x01\xf7\x76\xb6\xf8\x7d\x3e\x99\xdd\x85\x15\xf5\xc1\x88\x13\x45\x06\x2d\x09\x25\x58\x13\xa9\xe3\xd9\x5c\xe0\xd1\x6e\x3c\xb0\x54\x34\xee\x06\xa2\x1a\x0f\x02\x05\x8d\x07\x75\x12\xf3\x05\x2e\x4c\x7d\x4d\x29\xb8\xf2\x6f\x7e\x9b\xe9\xb8\x2c\xaa\x54\x1c\xae\x56\xa5\x92\xb0\x50\x06\x9b\x60\x55\x83\xa5\x0a\x31\x96\x44\x52\x28\x55\xfb\x62\xad\x54\x7a\x9b\x72\x8e\xb4\x3d\x82\x1a\x48\xff\x39\x91\x3b\xeb\xf0\xe7\x4d\xf9\xda\xfc\x3f\x8b\x51\x26\xa4\xdb\xb8\x4e\xf3\x2f\xae\x7a\x40\x35\xec\xd0\x14\x87\x55\x88\xf9\xa4\x9e\xc3\xf5\x8e\x3e\xff\x63\xa5\xf6\x81\xc8\x6f\xcf\xba\x5b\x07\x1f\x5f\xf0\xe3\x2c\x53\x35\xe4\x73\x8c\xa7\x12\xfd\x3f\x01\x00\x00\xff\xff\x16\x82\x89\x81\x3b\x48\x00\x00"), }, "/iam": &vfsgen۰DirInfo{ name: "iam", @@ -509,9 +509,9 @@ var Assets = func() http.FileSystem { "/identitytoolkit/oauth_idp_config.yaml": &vfsgen۰CompressedFileInfo{ name: "oauth_idp_config.yaml", modTime: time.Time{}, - uncompressedSize: 4239, + uncompressedSize: 4236, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x57\xdb\x6e\x1b\x37\x13\xbe\xd7\x53\x0c\xac\x0b\x27\x80\x0e\x86\x2f\xf5\x5f\xe9\x97\x9d\x76\x51\x43\x0a\x2c\xa5\x41\x80\x02\x05\xb5\x9c\x5d\x4d\xc3\x25\x59\x72\xd6\x8a\x1a\xf8\xdd\x0b\x92\xab\x48\xab\x43\x6a\x35\x41\x11\xde\xd8\x22\xe7\xf0\xcd\x81\x1f\x67\xbb\x30\x31\x76\xe3\xa8\x5c\x31\xdc\xde\xdc\xde\xc2\x4f\xc6\x94\x0a\xe1\xe1\x61\x32\x80\xb1\x52\xf0\x18\x8e\x3c\x3c\xa2\x47\xf7\x84\x72\xd0\xe9\x42\xa7\x0b\x0f\x94\xa3\xf6\x28\xa1\xd6\x12\x1d\xf0\x0a\x61\x6c\x45\xbe\xc2\xed\x49\x0f\x7e\x45\xe7\xc9\x68\xb8\x1d\xdc\xc0\xab\x20\x70\xd5\x1c\x5d\xbd\xfe\x5f\xa7\x0b\x1b\x53\x43\x25\x36\xa0\x0d\x43\xed\x11\x78\x45\x1e\x0a\x52\x08\xf8\x29\x47\xcb\x40\x1a\x72\x53\x59\x45\x42\xe7\x08\x6b\xe2\x55\x74\xd3\x18\x09\x38\x3e\x34\x26\xcc\x92\x05\x69\x10\x90\x1b\xbb\x01\x53\xec\xcb\x81\xe0\x84\x38\xac\x15\xb3\x1d\x0d\x87\xeb\xf5\x7a\x20\x22\xda\x81\x71\xe5\x50\x25\x49\x3f\x7c\xc8\x26\xf7\xd3\xf9\x7d\xff\x76\x70\x93\x74\xde\x69\x85\xde\x83\xc3\x3f\x6b\x72\x28\x61\xb9\x01\x61\xad\xa2\x5c\x2c\x15\x82\x12\x6b\x30\x0e\x44\xe9\x10\x25\xb0\x09\x88\xd7\x8e\x98\x74\xd9\x03\x6f\x0a\x5e\x0b\x87\x9d\x2e\x48\xf2\xec\x68\x59\x73\x2b\x5d\x5b\x7c\xe4\x5b\x02\x46\x83\xd0\x70\x35\x9e\x43\x36\xbf\x82\xff\x8f\xe7\xd9\xbc\xd7\xe9\xc2\xfb\x6c\xf1\xf3\xec\xdd\x02\xde\x8f\x1f\x1f\xc7\xd3\x45\x76\x3f\x87\xd9\x23\x4c\x66\xd3\xbb\x6c\x91\xcd\xa6\x73\x98\xbd\x81\xf1\xf4\x03\xfc\x92\x4d\xef\x7a\x80\xc4\x2b\x74\x80\x9f\xac\x0b\xf8\x8d\x03\x0a\x89\x4c\xd5\x9b\x23\xb6\x00\x14\x26\x01\xf2\x16\x73\x2a\x28\x07\x25\x74\x59\x8b\x12\xa1\x34\x4f\xe8\x34\xe9\x12\x2c\xba\x8a\x7c\x28\xa7\x07\xa1\x65\xa7\x0b\x8a\x2a\x62\xc1\x71\xe7\x28\xa8\x41\x87\x74\x61\x46\x1d\x00\x26\x56\x38\x82\x4c\xa2\x66\xe2\xcd\xc2\x18\xf5\x91\x78\x38\x1b\xd7\xbc\xca\xa4\x9d\x18\x5d\x50\xd9\x01\x90\xe8\x73\x47\x36\xd8\x1b\xc1\x62\x85\x87\x1a\xd0\xd6\x00\x87\xde\xd4\x2e\xc7\x0e\xc0\xa7\xbe\xcc\x55\xdf\xb3\xab\x73\xee\x6b\x51\xe1\x08\x8e\xcc\x27\x99\x95\xf0\x7d\x12\xd5\x08\x0a\xa1\x3c\x76\xac\xe0\x95\x0f\x20\x4b\xe4\xf0\xe7\x04\x8a\xa2\xd6\x79\xf8\x15\x3a\x34\x96\xb8\xc4\xd0\x98\x85\x71\x55\x8c\x1d\xc4\xd2\xd4\x0c\xe2\xd8\x23\x80\x15\x4e\x54\xc8\xe8\x7c\x32\xde\x87\x33\xe0\xc2\xda\xb6\xd8\x08\xd8\xd5\xd8\x6c\xb6\xe0\x8c\xa1\xa8\x95\x02\xd2\x9e\xe3\x85\x30\xc5\x29\xb7\xa1\x3d\x37\x2f\x0d\x26\x0a\xff\xd0\xe1\x48\x54\xc8\xf8\xd2\x78\x92\xf4\x0f\x07\x7f\xac\xd4\xa5\x11\x28\x75\x49\x0c\xd6\x99\x3f\x30\xe7\xaf\x81\xf7\xf9\x0a\x2b\x31\x6a\x7e\x01\xf0\xc6\xe2\x08\x02\xed\xe8\x60\x5b\x91\x7f\xf1\x15\x08\xb2\x27\x9a\xa6\x12\x7a\xf3\x5f\x82\x0e\x6f\x83\xd1\xa8\x39\xda\x4d\xa2\x8d\x8b\x36\x8a\xad\x7e\x43\x45\x27\x0b\x9e\xf8\x81\xe4\x17\x58\x7e\xf8\xf9\x73\xf3\xef\xf3\xf3\xd0\x88\x7d\x9d\x70\x16\x62\x78\x7e\x6e\x69\x5b\xe1\x50\x73\x3f\x37\x3a\xbc\x46\xe8\x0e\x43\xdc\x71\x50\xee\x50\x30\xb6\x42\x3d\x49\x50\xfb\x47\x0e\x85\xec\x33\x55\x68\x6a\x1e\xc1\x4d\xeb\x2c\xde\xe3\x73\x87\xa9\xa5\x8e\x4f\x53\x32\xcd\xf2\x54\x11\x9a\xdf\xfd\x83\x08\xac\x33\x16\x1d\x13\xfa\x5d\x4d\x72\x45\xa8\x39\x93\xbb\x9d\xa3\xe6\xda\xae\x84\xa7\x34\x0d\x49\x4f\x1a\xcd\x3d\x89\xa3\xe6\x4b\xd6\x81\x64\xbc\x60\x3a\x15\xaf\xd9\x1d\x1c\x60\x98\x63\xee\xb6\x44\x7e\x39\x8e\xa4\xfd\x02\x2c\x3e\x0a\x6e\xa7\x8c\x7d\x40\xbd\x70\x3b\x50\xc7\xd9\x60\x96\xdd\x4d\x20\x37\x12\xf7\x2c\x02\x14\xca\xac\x07\x47\x48\x3c\x6a\x4f\x4c\x4f\xed\x8e\x80\x30\x16\x58\x25\x36\xd3\x80\xf2\xe2\xa8\xee\x76\xca\x5f\x0d\x2a\x76\xf4\xb5\xdf\x3a\x8b\x97\x13\x3c\x72\x98\x75\x24\x3e\xa1\x0a\x25\xf7\x3b\xd0\x29\xbe\x13\xe5\x5e\x1a\xa3\x50\xe8\xf3\x88\xee\x93\xe6\x59\x34\xae\x46\xa0\x22\x70\x9f\x59\xfb\x98\xdc\xda\x87\xb1\xc2\x80\xa7\x52\xc7\xe1\x6a\x3b\x03\x5a\x67\x9e\x48\xa2\xdb\xc1\x22\xef\x6b\x74\x97\xa7\x29\x8b\x7a\xe7\x30\xbd\x31\x2e\x55\x32\x93\xd6\xf7\xa2\xeb\xe4\x08\x28\x8e\x27\x05\xed\x63\xd0\xff\xaa\x50\xff\x54\xa1\x58\x90\xa6\xdb\x8e\x87\x9f\xb4\x9a\x6b\x7a\xb9\xf3\xb7\xad\xfb\x7d\xd2\x7f\x63\xfb\xcb\x98\x78\xe4\x3c\x18\xfd\x58\x2f\xd1\x69\x64\xf4\x7d\xaa\xaa\x9a\x43\xa5\x0f\xba\x79\x47\x64\x05\x3a\xd4\xf9\x3e\x89\x04\xaa\xd9\xda\x0d\x17\xd2\xd4\x72\xfb\xb3\x12\x5a\x94\xe8\x86\xc7\x48\x01\x0a\x42\x25\x47\x31\x43\xad\xfd\xc4\xc4\x07\xfe\x1d\x7a\x6b\xb4\xc7\x45\x48\xcc\x51\xa2\x5a\x3c\x78\x22\x51\x8f\x7b\xda\xa7\xa4\x92\x95\xf6\xdb\x72\x46\xa7\x95\xe1\xeb\x90\xe2\xaa\x56\x4c\x56\xe1\x17\x8c\x11\x54\xe8\xfc\xc0\xc8\xe8\x53\xf2\x49\xef\x31\x4e\x78\x90\x8c\xa3\xbf\xe2\x0b\x7c\xcc\x31\xb0\x08\x5f\x53\xb9\xd0\x60\x8d\xf7\xb4\x54\x1b\x58\x62\xfc\x40\xaa\x96\xa4\xd3\xb3\x6d\x8a\x74\xcf\x89\x3d\xbc\xc2\x41\x39\x18\xc1\x67\x92\xbf\xfd\xce\xe6\x23\xea\x5e\xcb\x66\xdc\x7a\x7e\x3d\xb8\xde\xdb\x3d\xf5\x18\x84\x15\x48\xaf\xbd\x73\x9e\x21\x4e\x24\x7a\x72\xc8\x99\x07\x09\xcb\x8a\x58\xd6\x5e\x3b\x03\xd1\x6b\xf8\x94\x72\xc8\xb5\xd3\x28\xa1\x70\xa6\x82\x4c\xbe\xbd\xf6\x5f\xc9\x15\x44\x42\x93\xd6\xd0\xfe\x73\x12\x16\xc9\x45\x88\xf9\x1b\x02\xc9\x92\x85\x17\xc5\x92\xdd\xa5\x14\x7f\xcf\x08\xf8\x1b\xf1\xbf\x1c\xbd\xc8\xf3\xf0\x95\xf9\x5d\x22\xf8\x3b\x00\x00\xff\xff\xa7\x12\x50\x5d\x8f\x10\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x57\x4b\x6f\x1b\x39\x12\xbe\xeb\x57\x14\xac\x43\x12\x40\x0f\xc3\x47\xed\x49\x2b\x3b\xbb\x8d\x35\xa4\xc0\x52\x36\x08\x30\xc0\x80\x6a\x56\xb7\x6a\xc2\x26\x39\x64\xb5\x15\x4d\xe0\xff\x3e\x20\xd9\x8a\xd4\x6a\x29\x63\x4f\x82\x41\x78\xb1\x45\xd6\xe3\xab\x07\x3f\x56\xf7\x61\x66\xec\xce\x51\xb9\x61\xb8\xb9\xbe\xb9\x81\xff\x18\x53\x2a\x84\xfb\xfb\xd9\x08\xa6\x4a\xc1\x43\x38\xf2\xf0\x80\x1e\xdd\x23\xca\x51\xaf\x0f\xbd\x3e\xdc\x53\x8e\xda\xa3\x84\x5a\x4b\x74\xc0\x1b\x84\xa9\x15\xf9\x06\xf7\x27\x03\xf8\x3f\x3a\x4f\x46\xc3\xcd\xe8\x1a\x5e\x07\x81\xab\xe6\xe8\xea\xcd\xbf\x7a\x7d\xd8\x99\x1a\x2a\xb1\x03\x6d\x18\x6a\x8f\xc0\x1b\xf2\x50\x90\x42\xc0\xcf\x39\x5a\x06\xd2\x90\x9b\xca\x2a\x12\x3a\x47\xd8\x12\x6f\xa2\x9b\xc6\x48\xc0\xf1\xb1\x31\x61\xd6\x2c\x48\x83\x80\xdc\xd8\x1d\x98\xe2\x58\x0e\x04\x27\xc4\x61\x6d\x98\xed\x64\x3c\xde\x6e\xb7\x23\x11\xd1\x8e\x8c\x2b\xc7\x2a\x49\xfa\xf1\x7d\x36\xbb\x9b\x2f\xef\x86\x37\xa3\xeb\xa4\xf3\x5e\x2b\xf4\x1e\x1c\xfe\x5e\x93\x43\x09\xeb\x1d\x08\x6b\x15\xe5\x62\xad\x10\x94\xd8\x82\x71\x20\x4a\x87\x28\x81\x4d\x40\xbc\x75\xc4\xa4\xcb\x01\x78\x53\xf0\x56\x38\xec\xf5\x41\x92\x67\x47\xeb\x9a\x5b\xe9\xda\xe3\x23\xdf\x12\x30\x1a\x84\x86\xab\xe9\x12\xb2\xe5\x15\xfc\x7b\xba\xcc\x96\x83\x5e\x1f\x3e\x64\xab\xff\x2e\xde\xaf\xe0\xc3\xf4\xe1\x61\x3a\x5f\x65\x77\x4b\x58\x3c\xc0\x6c\x31\xbf\xcd\x56\xd9\x62\xbe\x84\xc5\x5b\x98\xce\x3f\xc2\xff\xb2\xf9\xed\x00\x90\x78\x83\x0e\xf0\xb3\x75\x01\xbf\x71\x40\x21\x91\xa9\x7a\x4b\xc4\x16\x80\xc2\x24\x40\xde\x62\x4e\x05\xe5\xa0\x84\x2e\x6b\x51\x22\x94\xe6\x11\x9d\x26\x5d\x82\x45\x57\x91\x0f\xe5\xf4\x20\xb4\xec\xf5\x41\x51\x45\x2c\x38\xee\x74\x82\x1a\xf5\x48\x17\x66\xd2\x03\x60\x62\x85\x13\xc8\x24\x6a\x26\xde\xad\x8c\x51\x9f\x88\xc7\x8b\x69\xcd\x9b\x4c\xda\x99\xd1\x05\x95\x3d\x00\x89\x3e\x77\x64\x83\xbd\x09\xac\x36\x78\xaa\x01\x6d\x0d\x70\xe8\x4d\xed\x72\xec\x01\x7c\x1e\xca\x5c\x0d\x3d\xbb\x3a\xe7\xa1\x16\x15\x4e\xa0\x63\x3e\xc9\x6c\x84\x1f\x92\xa8\x26\x50\x08\xe5\xb1\x67\x05\x6f\x7c\x00\x59\x22\x87\x3f\x67\x50\x14\xb5\xce\xc3\xaf\xd0\xa1\xb1\xc4\x25\x86\xc6\x2c\x8c\xab\x62\xec\x20\xd6\xa6\x66\x10\x5d\x8f\x00\x56\x38\x51\x21\xa3\xf3\xc9\xf8\x10\x2e\x80\x0b\x6b\xdf\x62\x13\x60\x57\x63\xb3\xd9\x82\x33\x85\xa2\x56\x0a\x48\x7b\x8e\x17\xc2\x14\xe7\xdc\x86\xf6\xdc\x3d\x37\x98\x28\xfc\x53\x87\x23\x51\x21\xe3\x73\xe3\x49\xd2\x3f\x1d\xfc\xa9\x52\x2f\x8d\x40\xa9\x97\xc4\x60\x9d\xf9\x0d\x73\xfe\x16\x78\x9f\x6f\xb0\x12\x93\xe6\x17\x00\xef\x2c\x4e\x20\xd0\x8e\x0e\xb6\x15\xf9\x67\x5f\x81\x20\x7b\xa6\x69\x2a\xa1\x77\xff\x24\xe8\xf0\x36\x18\x8d\x9a\xa3\xdd\x24\xda\xb8\x68\xa3\xd8\xeb\x37\x54\x74\xb6\xe0\x89\x1f\x48\x7e\x85\xe5\xc7\x5f\xbe\x34\xff\x3e\x3d\x8d\x8d\x38\xd6\x09\x67\x21\x86\xa7\xa7\x96\xb6\x15\x0e\x35\x0f\x73\xa3\xc3\x6b\x84\xee\x34\xc4\x03\x07\xe5\x0e\x05\x63\x2b\xd4\xb3\x04\x75\x7c\xe4\x50\xc8\x21\x53\x85\xa6\xe6\x09\x5c\xb7\xce\xe2\x3d\xbe\x74\x98\x5a\xaa\x7b\x9a\x92\x69\xd6\xe7\x8a\xd0\xfc\x1e\x9e\x44\x60\x9d\xb1\xe8\x98\xd0\x1f\x6a\x92\x2b\x42\xcd\x99\x3c\xec\x74\x9a\x6b\xbf\x12\x9e\xd2\x34\x24\x3d\x6b\x34\x8f\x24\x3a\xcd\x97\xac\x03\xc9\x78\xc1\x74\x2a\x5e\xb3\x3b\x3a\xc1\xb0\xc4\xdc\xed\x89\xfc\xe5\x38\x92\xf6\x33\xb0\xf8\x28\xb8\x9f\x32\x8e\x01\x0d\xc2\xed\x40\x1d\x67\x83\x45\x76\x3b\x83\xdc\x48\x3c\xb2\x08\x50\x28\xb3\x1d\x75\x90\x78\xd4\x9e\x98\x1e\xdb\x1d\x01\x61\x2c\xb0\x4a\xec\xe6\x01\xe5\x8b\xa3\xba\x3d\x28\x7f\x33\xa8\xd8\xd1\xaf\xfc\xde\x59\xbc\x9c\xe0\x91\xc3\xac\x23\xf1\x11\x55\x28\xb9\x3f\x80\x4e\xf1\x9d\x29\xf7\xda\x18\x85\x42\x5f\x46\x74\x97\x34\x2f\xa2\x71\x35\x02\x15\x81\xfb\xcc\xd6\xc7\xe4\xd6\x3e\x8c\x15\x06\x3c\x95\x3a\x0e\x57\xfb\x19\xd0\x3a\xf3\x48\x12\xdd\x01\x16\x79\x5f\xa3\x7b\x79\x9a\xb2\xa8\x77\x09\xd3\x5b\xe3\x52\x25\x33\x69\xfd\x20\xba\x4e\x8e\x80\xe2\x78\x52\xd0\x31\x06\xfd\xb7\x0a\xf5\x57\x15\x8a\x05\x69\xba\xad\x3b\xfc\xa4\xd5\x5c\xd3\x97\x3b\x7f\xd7\xba\xdf\x67\xfd\x37\xb6\xbf\x8e\x89\x1d\xe7\xc1\xe8\xa7\x7a\x8d\x4e\x23\xa3\x1f\x52\x55\xd5\x1c\x2a\x7d\xd2\xcd\x07\x22\x2b\xd0\xa1\xce\x8f\x49\x24\x50\xcd\xde\x6e\xb8\x90\xa6\x96\xfb\x9f\x95\xd0\xa2\x44\x37\xee\x22\x05\x28\x08\x95\x9c\xc4\x0c\xb5\xf6\x13\x13\x9f\xf8\x77\xe8\xad\xd1\x1e\x57\x21\x31\x9d\x44\xb5\x78\xf0\x4c\xa2\x1e\x8e\xb4\xcf\x49\x25\x2b\xed\xb7\xe5\x82\x4e\x27\xc3\x55\xad\x98\xac\xc2\xaf\x10\x23\xa6\xd0\xf8\x81\x90\xd1\xa7\xdc\x93\x3e\x22\x9c\xf0\x1e\x19\x47\x7f\xc4\x07\xb8\x4b\x31\xb0\x0a\x1f\x53\xb9\xd0\x60\x8d\xf7\xb4\x56\x3b\x58\x63\xfc\x3e\xaa\xd6\xa4\xd3\xab\x6d\x8a\x74\xcd\x89\x3d\xbc\xc6\x51\x39\x82\x2f\x24\x7f\xf9\x95\xcd\x27\xd4\x83\x96\xc9\xb8\xf5\xf4\xe6\x98\xb8\xce\xbd\x04\x61\x05\xc6\x6b\xef\x5c\xa6\x87\x33\x59\x9e\x9d\x12\xe6\x49\xb6\xb2\x22\xd6\x74\xd0\x8e\x3f\x7a\x0d\xdf\x51\x0e\xb9\x76\x1a\x25\x14\xce\x54\x90\xc9\x77\xaf\xfc\x37\x32\x05\x91\xcd\xa4\x35\x74\xfc\x96\x84\x45\x72\x15\x42\xfe\x8e\x40\xb2\x64\xe1\x59\xb1\x64\xb7\x29\xc3\x3f\x32\x02\xfe\x4e\xfc\xcf\x47\x2f\xf2\x3c\x7c\x62\xfe\x90\x08\xfe\x0c\x00\x00\xff\xff\x52\x6e\xfb\x1f\x8c\x10\x00\x00"), }, "/identitytoolkit/tenant.yaml": &vfsgen۰CompressedFileInfo{ name: "tenant.yaml", @@ -523,9 +523,9 @@ var Assets = func() http.FileSystem { "/identitytoolkit/tenant_oauth_idp_config.yaml": &vfsgen۰CompressedFileInfo{ name: "tenant_oauth_idp_config.yaml", modTime: time.Time{}, - uncompressedSize: 4808, + uncompressedSize: 4805, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x58\x5d\x6f\xdb\x36\x17\xbe\xf7\xaf\x38\x48\x2e\xd2\x02\xfe\x08\x72\xe9\xf7\xca\xaf\x93\x6e\xc2\x82\xb8\x88\xdd\x15\x05\x06\x0c\xb4\x74\x24\x9f\x95\x22\x39\xf2\x28\xae\x57\xf8\xbf\x0f\x24\xe5\x58\xb2\xe5\x2c\x5e\x8b\x01\xd5\x4d\x2c\xf2\x7c\x3c\xe7\x83\x0f\x8f\x72\x09\x53\x6d\x36\x96\x8a\x15\xc3\xcd\xf5\xcd\x0d\xfc\xa4\x75\x21\x11\xee\xef\xa7\x43\x98\x48\x09\x8f\x7e\xcb\xc1\x23\x3a\xb4\x4f\x98\x0d\x7b\x97\xd0\xbb\x84\x7b\x4a\x51\x39\xcc\xa0\x52\x19\x5a\xe0\x15\xc2\xc4\x88\x74\x85\xbb\x9d\x3e\xfc\x8a\xd6\x91\x56\x70\x33\xbc\x86\x37\x5e\xe0\xa2\xde\xba\x78\xfb\xbf\xde\x25\x6c\x74\x05\xa5\xd8\x80\xd2\x0c\x95\x43\xe0\x15\x39\xc8\x49\x22\xe0\x97\x14\x0d\x03\x29\x48\x75\x69\x24\x09\x95\x22\xac\x89\x57\xc1\x4d\x6d\xc4\xe3\xf8\x54\x9b\xd0\x4b\x16\xa4\x40\x40\xaa\xcd\x06\x74\xde\x94\x03\xc1\x11\xb1\x7f\x56\xcc\x66\x3c\x1a\xad\xd7\xeb\xa1\x08\x68\x87\xda\x16\x23\x19\x25\xdd\xe8\x3e\x99\xde\x3d\xcc\xef\x06\x37\xc3\xeb\xa8\xf3\x41\x49\x74\x0e\x2c\xfe\x59\x91\xc5\x0c\x96\x1b\x10\xc6\x48\x4a\xc5\x52\x22\x48\xb1\x06\x6d\x41\x14\x16\x31\x03\xd6\x1e\xf1\xda\x12\x93\x2a\xfa\xe0\x74\xce\x6b\x61\xb1\x77\x09\x19\x39\xb6\xb4\xac\xb8\x95\xae\x1d\x3e\x72\x2d\x01\xad\x40\x28\xb8\x98\xcc\x21\x99\x5f\xc0\xff\x27\xf3\x64\xde\xef\x5d\xc2\xc7\x64\xf1\xf3\xec\xc3\x02\x3e\x4e\x1e\x1f\x27\x0f\x8b\xe4\x6e\x0e\xb3\x47\x98\xce\x1e\x6e\x93\x45\x32\x7b\x98\xc3\xec\x1d\x4c\x1e\x3e\xc1\x2f\xc9\xc3\x6d\x1f\x90\x78\x85\x16\xf0\x8b\xb1\x1e\xbf\xb6\x40\x3e\x91\xb1\x7a\x73\xc4\x16\x80\x5c\x47\x40\xce\x60\x4a\x39\xa5\x20\x85\x2a\x2a\x51\x20\x14\xfa\x09\xad\x22\x55\x80\x41\x5b\x92\xf3\xe5\x74\x20\x54\xd6\xbb\x04\x49\x25\xb1\xe0\xb0\x72\x14\xd4\xb0\x47\x2a\xd7\xe3\x1e\x00\x13\x4b\x1c\x43\x92\xa1\x62\xe2\xcd\x42\x6b\xf9\x99\x78\xb4\x40\x25\x14\xcf\x26\x15\xaf\x92\xcc\x4c\xb5\xca\xa9\xe8\x01\x64\xe8\x52\x4b\xc6\x5b\x1d\xc3\x62\x85\x87\x7a\xd0\xa5\x07\x16\x9d\xae\x6c\x8a\x3d\x80\x2f\x83\x2c\x95\x03\xc7\xb6\x4a\x79\xa0\x44\x89\xe3\x4e\x95\x67\xc9\x95\x70\x03\x12\xe5\x18\x72\x21\x1d\xf6\x8c\xe0\x95\xf3\xb0\x0b\x64\xff\xa7\x03\x51\x5e\xa9\xd4\xbf\xf9\x9e\x0d\x45\x2f\xd0\xb7\x6a\xae\x6d\x19\xb2\x01\x62\xa9\x2b\x06\x71\xca\x2f\x80\x11\x56\x94\xc8\x68\x5d\x74\x31\x80\x17\x81\xfa\x67\xd7\x80\x63\x60\x5b\x61\xbd\xd8\x82\x36\x81\xbc\x92\x12\x48\x39\x0e\xc7\x45\xe7\xa7\x21\xf8\x16\xde\xbc\x36\xbc\x20\xfc\x83\x05\x98\xa1\x44\xc6\xd7\x46\x18\xa5\x7f\x80\x80\x26\x52\x9e\x1b\x93\x94\xe7\x47\x65\xac\xfe\x03\x53\x7e\x29\x10\x97\xae\xb0\x14\xe3\xfa\x0d\x80\x37\x06\xc7\xe0\x49\x4c\x15\x2d\x5b\x1c\x9c\x7f\x83\x29\x49\xee\xd5\x27\xd1\xcb\x76\x74\x6a\x29\xd4\xe6\x87\xce\x82\xbf\x05\xb5\x42\xc5\x01\x62\x14\xad\xd1\x76\x85\xb5\xb3\x52\x53\xef\x0b\x4d\x1a\x39\x90\xb2\xe7\x68\xdd\xe8\xeb\xd7\xfa\xe7\x76\x3b\x8a\xb0\xfd\x5a\xfc\xb5\xdd\x8e\xb4\x68\x9a\xf1\x5b\x3e\xc2\xed\xb6\x65\xd0\x08\x8b\x8a\x07\xa9\x56\xfe\x5a\x46\x7b\x98\xcc\x3d\xf5\xa6\x16\x05\x63\x2b\x13\x9d\xbc\xdc\xdc\xb2\x28\xb2\x01\x53\x89\xba\xe2\x31\x5c\xb7\xf6\x02\x59\x9d\xda\x8c\x67\xe2\x78\x37\xe6\x5a\x2f\xbb\xca\x5d\xbf\x0f\x0e\x22\x18\xb4\x4b\x6a\xac\x36\x68\x99\xd0\xed\x2b\x98\x4a\x42\xc5\x49\xb6\x5f\xe9\x68\x90\x26\xbc\x42\xd7\x17\xd6\xb4\xd6\x6c\x48\x1c\xf5\x7e\xb4\x0e\x94\x05\xda\x50\x10\xca\x5b\xaf\x0e\x0f\x30\xcc\x31\xb5\xbb\xeb\xec\x7c\x1c\x51\xfb\x15\x58\x5c\x10\xdc\x4d\x5f\x4d\x40\x7d\x7f\x38\x51\x85\x99\x69\x96\xdc\x4e\x21\xd5\x19\x36\x2c\x02\xe4\x52\xaf\x87\x47\x48\x1c\x2a\x47\x4c\x4f\xed\x06\x01\x3f\x2e\x19\x29\x36\x0f\x1e\xe5\xd9\x51\xdd\xee\x95\x5f\x0c\x2a\x34\xf8\x95\xdb\x39\x0b\x27\x19\x1c\xb2\x9f\x01\x33\x7c\x42\xe9\x4b\xee\xf6\xa0\x63\x7c\x1d\xe5\x5e\x6a\x2d\x51\xa8\xd3\x88\xee\xa2\xe6\x49\x34\xb6\x42\xa0\xdc\x73\xb9\x5e\xbb\x90\xdc\xca\xf9\x71\x4b\x83\xa3\x42\x85\xa1\x73\x37\x1b\x1b\xab\x9f\x28\x43\xbb\x87\x45\xce\x55\x68\xcf\x4f\x53\x12\xf4\x4e\x61\x7a\xa7\x6d\xac\x64\x92\x19\xd7\x0f\xae\xa3\x23\xa0\x30\xb0\xe5\xd4\xc4\xa0\xfe\x55\xa1\xfe\xa9\x42\xa1\x20\x75\xb7\x1d\x0f\x82\xf1\xa9\x4f\xed\xf9\xce\xdf\xb7\x8e\x7b\xa7\xff\xda\xf6\xf3\xf8\x7c\xe4\xdc\x1b\xfd\x5c\x2d\xd1\x2a\x64\x74\x03\x2a\xcb\x8a\x7d\xa5\x0f\xba\x79\xcf\x6b\x39\x5a\x54\x69\x93\x44\x3c\xd3\xec\xec\xfa\x03\xa9\xab\x6c\xf7\x5a\x0a\x25\x0a\xb4\xa3\x63\xa4\x00\x39\xa1\xcc\xc6\x21\x43\xad\xf5\x48\xcc\x07\xfe\x2d\x3a\xa3\x95\xc3\x85\x4f\xcc\x51\xa2\x5a\xb4\xd8\x91\xa8\xc7\x86\x76\x97\x54\xb4\xd2\x75\x07\x9d\xd0\x6c\xe5\xf9\xca\x27\xba\xac\x24\x93\x91\xf8\x8c\x34\x40\xf3\xfd\xef\x69\x1a\x5d\x2c\x01\xa9\x06\xef\xf8\x5b\x4a\x5b\xfa\x2b\x8c\x01\xc7\x4c\x03\x0b\xff\xad\x99\x0a\x05\x46\x3b\x47\x4b\xb9\x81\x25\x86\xcf\xc7\x72\x49\x2a\xce\x0e\x3a\x8f\xa7\x9d\xd8\xc1\x1b\x1c\x16\xc3\x31\x7c\xa5\xec\xb7\xdf\x59\x7f\x46\xd5\x6f\xd9\x0c\x4b\xdb\xb7\xc3\xab\xc6\x6a\xd7\x95\xe0\x1f\x4f\x7d\xed\x95\xd3\x3c\xd1\x91\xee\xe9\x21\x73\x1e\x24\x2c\xc9\x43\x71\xfb\xed\x0c\x04\xaf\xfe\x43\xd3\x22\x57\x56\x61\x06\xb9\xd5\x25\x24\xd9\xfb\x2b\xf7\x42\xae\x20\xd0\x5a\x66\x34\x35\x2f\x15\xff\x50\xb6\xf0\x31\x7f\x43\x20\x49\xb4\xf0\xaa\x58\x92\xdb\x98\xe2\xef\x19\x01\x7f\x23\xfe\xd7\xa3\x17\x69\xea\xbf\xc1\xbf\x6b\x04\x71\xfa\x38\x9f\xd6\x16\xcd\xa9\xe5\x08\xb2\x3f\x6c\xd1\xf2\x7f\x48\x6a\xbb\x2f\x7c\x6e\xfd\x67\xe0\x7c\x3a\xfb\x3b\x00\x00\xff\xff\xc7\x99\xc3\x8c\xc8\x12\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x58\x5b\x6f\xdb\xb8\x12\x7e\xf7\xaf\x18\x24\x0f\x6d\x01\x5f\x82\x3c\xfa\x3c\xf9\x38\xe9\x39\xc2\x09\xec\x22\x76\x4f\x51\x60\x81\x05\x2d\x8d\xe4\xd9\x52\x24\x97\x1c\xc5\xf5\x16\xfe\xef\x0b\x92\x72\x2c\xd9\x72\x36\xde\x16\x0b\x54\x2f\xb1\xc8\xb9\x7c\x73\xe1\xc7\x51\xae\x61\xaa\xcd\xd6\x52\xb1\x66\xb8\xbd\xb9\xbd\x85\xff\x68\x5d\x48\x84\x87\x87\xe9\x10\x26\x52\xc2\xa3\xdf\x72\xf0\x88\x0e\xed\x13\x66\xc3\xde\x35\xf4\xae\xe1\x81\x52\x54\x0e\x33\xa8\x54\x86\x16\x78\x8d\x30\x31\x22\x5d\xe3\x7e\xa7\x0f\xff\x47\xeb\x48\x2b\xb8\x1d\xde\xc0\x5b\x2f\x70\x55\x6f\x5d\xbd\xfb\x57\xef\x1a\xb6\xba\x82\x52\x6c\x41\x69\x86\xca\x21\xf0\x9a\x1c\xe4\x24\x11\xf0\x6b\x8a\x86\x81\x14\xa4\xba\x34\x92\x84\x4a\x11\x36\xc4\xeb\xe0\xa6\x36\xe2\x71\x7c\xae\x4d\xe8\x15\x0b\x52\x20\x20\xd5\x66\x0b\x3a\x6f\xca\x81\xe0\x88\xd8\x3f\x6b\x66\x33\x1e\x8d\x36\x9b\xcd\x50\x04\xb4\x43\x6d\x8b\x91\x8c\x92\x6e\xf4\x90\x4c\xef\x67\x8b\xfb\xc1\xed\xf0\x26\xea\x7c\x54\x12\x9d\x03\x8b\xbf\x57\x64\x31\x83\xd5\x16\x84\x31\x92\x52\xb1\x92\x08\x52\x6c\x40\x5b\x10\x85\x45\xcc\x80\xb5\x47\xbc\xb1\xc4\xa4\x8a\x3e\x38\x9d\xf3\x46\x58\xec\x5d\x43\x46\x8e\x2d\xad\x2a\x6e\xa5\x6b\x8f\x8f\x5c\x4b\x40\x2b\x10\x0a\xae\x26\x0b\x48\x16\x57\xf0\xef\xc9\x22\x59\xf4\x7b\xd7\xf0\x29\x59\xfe\x77\xfe\x71\x09\x9f\x26\x8f\x8f\x93\xd9\x32\xb9\x5f\xc0\xfc\x11\xa6\xf3\xd9\x5d\xb2\x4c\xe6\xb3\x05\xcc\xdf\xc3\x64\xf6\x19\xfe\x97\xcc\xee\xfa\x80\xc4\x6b\xb4\x80\x5f\x8d\xf5\xf8\xb5\x05\xf2\x89\x8c\xd5\x5b\x20\xb6\x00\xe4\x3a\x02\x72\x06\x53\xca\x29\x05\x29\x54\x51\x89\x02\xa1\xd0\x4f\x68\x15\xa9\x02\x0c\xda\x92\x9c\x2f\xa7\x03\xa1\xb2\xde\x35\x48\x2a\x89\x05\x87\x95\x93\xa0\x86\x3d\x52\xb9\x1e\xf7\x00\x98\x58\xe2\x18\x92\x0c\x15\x13\x6f\x97\x5a\xcb\x2f\xc4\xa3\x25\x2a\xa1\x78\x3e\xa9\x78\x9d\x64\x66\xaa\x55\x4e\x45\x0f\x20\x43\x97\x5a\x32\xde\xea\x18\x96\x6b\x3c\xd6\x83\x2e\x3d\xb0\xe8\x74\x65\x53\xec\x01\x7c\x1d\x64\xa9\x1c\x38\xb6\x55\xca\x03\x25\x4a\x1c\x77\xaa\x3c\x4b\xae\x85\x1b\x90\x28\xc7\x90\x0b\xe9\xb0\x67\x04\xaf\x9d\x87\x5d\x20\xfb\x3f\x1d\x88\xf2\x4a\xa5\xfe\xcd\xf7\x6c\x28\x7a\x81\xbe\x55\x73\x6d\xcb\x90\x0d\x10\x2b\x5d\x31\x88\x73\x7e\x01\x8c\xb0\xa2\x44\x46\xeb\xa2\x8b\x01\xbc\x08\xd4\x3f\xfb\x06\x1c\x03\xdb\x0a\xeb\xc5\x16\xb4\x09\xe4\x95\x94\x40\xca\x71\x38\x2e\x3a\x3f\x0f\xc1\xb7\xf0\xf6\xb5\xe1\x05\xe1\x9f\x2c\xc0\x0c\x25\x32\xbe\x36\xc2\x28\xfd\x13\x04\x34\x91\xf2\xd2\x98\xa4\xbc\x3c\x2a\x63\xf5\x6f\x98\xf2\x4b\x81\xb8\x74\x8d\xa5\x18\xd7\x6f\x00\xbc\x35\x38\x06\x4f\x62\xaa\x68\xd9\xe2\xe0\xfc\x3b\x4c\x49\x72\xaf\x3e\x89\x5e\xb6\xa3\x53\x4b\xa1\xb6\x3f\x75\x16\xfc\x2d\xa8\x15\x2a\x0e\x10\xa3\x68\x8d\xb6\x2b\xac\xbd\x95\x9a\x7a\x5f\x68\xd2\xc8\x81\x94\x3d\x47\xeb\x46\xdf\xbe\xd5\x3f\x77\xbb\x51\x84\xed\xd7\xe2\xaf\xdd\x6e\xa4\x45\xd3\x8c\xdf\xf2\x11\xee\x76\x2d\x83\x46\x58\x54\x3c\x48\xb5\xf2\xd7\x32\xda\xe3\x64\x1e\xa8\x37\xb5\x28\x18\x5b\x99\xe8\xe4\xe5\xe6\x96\x45\x91\x0d\x98\x4a\xd4\x15\x8f\xe1\xa6\xb5\x17\xc8\xea\xdc\x66\x3c\x13\xa7\xbb\x31\xd7\x7a\xd5\x55\xee\xfa\x7d\x70\x14\xc1\xa0\x5d\x52\x63\xb5\x41\xcb\x84\xee\x50\xc1\x54\x12\x2a\x4e\xb2\xc3\x4a\x47\x83\x34\xe1\x15\xba\xbe\xb0\xa6\xb5\x66\x43\xe2\xa4\xf7\xa3\x75\xa0\x2c\xd0\x86\x82\x50\xde\x7a\x75\x78\x84\x61\x81\xa9\xdd\x5f\x67\x97\xe3\x88\xda\xaf\xc0\xe2\x82\xe0\x7e\xfa\x6a\x02\xea\xfb\xc3\x89\x2a\xcc\x4c\xf3\xe4\x6e\x0a\xa9\xce\xb0\x61\x11\x20\x97\x7a\x33\x3c\x41\xe2\x50\x39\x62\x7a\x6a\x37\x08\xf8\x71\xc9\x48\xb1\x9d\x79\x94\x17\x47\x75\x77\x50\x7e\x31\xa8\xd0\xe0\x6f\xdc\xde\x59\x38\xc9\xe0\x90\xfd\x0c\x98\xe1\x13\x4a\x5f\x72\x77\x00\x1d\xe3\xeb\x28\xf7\x4a\x6b\x89\x42\x9d\x47\x74\x1f\x35\xcf\xa2\xb1\x15\x02\xe5\x9e\xcb\xf5\xc6\x85\xe4\x56\xce\x8f\x5b\x1a\x1c\x15\x2a\x0c\x9d\xfb\xd9\xd8\x58\xfd\x44\x19\xda\x03\x2c\x72\xae\x42\x7b\x79\x9a\x92\xa0\x77\x0e\xd3\x7b\x6d\x63\x25\x93\xcc\xb8\x7e\x70\x1d\x1d\x01\x85\x81\x2d\xa7\x26\x06\xf5\xb7\x0a\xf5\x57\x15\x0a\x05\xa9\xbb\xed\x74\x10\x8c\x4f\x7d\x6a\x2f\x77\xfe\xa1\x75\xdc\x3b\xfd\xd7\xb6\x9f\xc7\xe7\x13\xe7\xde\xe8\x97\x6a\x85\x56\x21\xa3\x1b\x50\x59\x56\xec\x2b\x7d\xd4\xcd\x07\x5e\xcb\xd1\xa2\x4a\x9b\x24\xe2\x99\x66\x6f\xd7\x1f\x48\x5d\x65\xfb\xd7\x52\x28\x51\xa0\x1d\x9d\x22\x05\xc8\x09\x65\x36\x0e\x19\x6a\xad\x47\x62\x3e\xf2\x6f\xd1\x19\xad\x1c\x2e\x7d\x62\x4e\x12\xd5\xa2\xc5\x8e\x44\x3d\x36\xb4\xbb\xa4\xa2\x95\xae\x3b\xe8\x8c\xe6\x49\x9e\xcb\x4a\x32\x19\x89\xcf\x40\x03\x32\xdf\xfe\x9e\xa5\xd1\xc5\x0a\x90\x6a\xd0\x8e\xbf\xa4\xb4\xa5\x3f\xc2\x14\x70\x4a\x34\xb0\xf4\x9f\x9a\xa9\x50\x60\xb4\x73\xb4\x92\x5b\x58\x61\xf8\x7a\x2c\x57\xa4\xe2\xe8\xa0\xf3\x78\xd8\x89\x1d\xbc\xc5\x61\x31\x84\x6f\x94\xfd\xf2\x2b\xeb\x2f\xa8\xfa\x2d\x93\x61\x69\xf7\xae\x49\x5f\x5d\xf7\x81\x7f\x3c\xef\xb5\x57\xce\x93\x44\x47\xae\xa7\xc7\xb4\x79\x94\xad\x24\x0f\x95\xed\xb7\xe3\x0f\x5e\xfd\x57\xa6\x45\xae\xac\xc2\x0c\x72\xab\x4b\x48\xb2\x0f\x6f\xdc\x0b\x99\x82\xc0\x69\x99\xd1\xd4\xbc\x51\xfc\x43\xd9\xd2\x87\xfc\x1d\x81\x24\xd1\xc2\xab\x62\x49\xee\x62\x86\x7f\x64\x04\xfc\x9d\xf8\x5f\x8f\x5e\xa4\xa9\xff\x00\xff\xa1\x11\xc4\xd1\xe3\x72\x4e\x5b\x36\x47\x96\x13\xc8\xfe\xa8\x45\xcb\xff\x20\xa3\xed\x3f\xef\xb9\xf5\x6f\x81\xcb\xb9\xec\xcf\x00\x00\x00\xff\xff\x58\x1a\xac\x28\xc5\x12\x00\x00"), }, "/logging": &vfsgen۰DirInfo{ name: "logging", @@ -548,9 +548,9 @@ var Assets = func() http.FileSystem { "/logging/log_metric.yaml": &vfsgen۰CompressedFileInfo{ name: "log_metric.yaml", modTime: time.Time{}, - uncompressedSize: 22658, + uncompressedSize: 22657, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x3c\x6b\x73\x1b\x37\x92\xdf\xfd\x2b\xfa\x98\x4a\x59\xf2\xf2\x25\x39\xde\xdb\x55\xca\x57\x45\x4b\x8c\xa3\x8a\x24\xeb\xf4\x48\x36\x9b\xca\x6a\xc0\x19\x90\x44\x34\x03\x4c\x00\x8c\x28\xc6\xeb\xff\x7e\xd5\x0d\xcc\x7b\x48\xd1\x8f\xdd\xbb\x3a\x7d\xb1\x39\x03\x34\xba\x1b\x8d\x7e\x63\xbe\x82\x63\x95\xae\xb5\x58\x2c\x2d\x1c\x8e\x0f\x0f\xe1\xad\x52\x8b\x98\xc3\xd9\xd9\xf1\x10\x26\x71\x0c\x57\xf8\xca\xc0\x15\x37\x5c\x3f\xf0\x68\xf8\xec\x2b\x78\xf6\x15\x9c\x89\x90\x4b\xc3\x23\xc8\x64\xc4\x35\xd8\x25\x87\x49\xca\xc2\x25\xcf\xdf\xf4\xe1\x47\xae\x8d\x50\x12\x0e\x87\x63\xd8\xc3\x01\x3d\xff\xaa\xb7\xff\xed\xb3\xaf\x60\xad\x32\x48\xd8\x1a\xa4\xb2\x90\x19\x0e\x76\x29\x0c\xcc\x45\xcc\x81\x3f\x86\x3c\xb5\x20\x24\x84\x2a\x49\x63\xc1\x64\xc8\x61\x25\xec\x92\x96\xf1\x40\x10\x8f\x9f\x3d\x08\x35\xb3\x4c\x48\x60\x10\xaa\x74\x0d\x6a\x5e\x1d\x07\xcc\x3a\x8c\xf1\x6f\x69\x6d\x7a\x34\x1a\xad\x56\xab\x21\x23\x6c\x87\x4a\x2f\x46\xb1\x1b\x69\x46\x67\xa7\xc7\xd3\x8b\xeb\xe9\xe0\x70\x38\x76\x73\x6e\x65\xcc\x8d\x01\xcd\x7f\xcf\x84\xe6\x11\xcc\xd6\xc0\xd2\x34\x16\x21\x9b\xc5\x1c\x62\xb6\x02\xa5\x81\x2d\x34\xe7\x11\x58\x85\x18\xaf\xb4\xb0\x42\x2e\xfa\x60\xd4\xdc\xae\x98\xe6\xcf\xbe\x82\x48\x18\xab\xc5\x2c\xb3\x35\x76\xe5\xf8\x09\x53\x1b\xa0\x24\x30\x09\xbd\xc9\x35\x9c\x5e\xf7\xe0\xcd\xe4\xfa\xf4\xba\xff\xec\x2b\xf8\xe9\xf4\xe6\xfb\x77\xb7\x37\xf0\xd3\xe4\xea\x6a\x72\x71\x73\x3a\xbd\x86\x77\x57\x70\xfc\xee\xe2\xe4\xf4\xe6\xf4\xdd\xc5\x35\xbc\xfb\x0e\x26\x17\x3f\xc3\x0f\xa7\x17\x27\x7d\xe0\xc2\x2e\xb9\x06\xfe\x98\x6a\xc4\x5f\x69\x10\xc8\x48\xb7\x7b\xd7\x9c\xd7\x10\x98\x2b\x87\x90\x49\x79\x28\xe6\x22\x84\x98\xc9\x45\xc6\x16\x1c\x16\xea\x81\x6b\x29\xe4\x02\x52\xae\x13\x61\x70\x3b\x0d\x30\x19\x3d\xfb\x0a\x62\x91\x08\xcb\x2c\x3d\x69\x11\x35\x7c\x26\xe4\x5c\x1d\x3d\x03\xb0\xc2\xc6\xfc\x08\xce\xd4\x62\x21\xe4\x62\x74\xa6\x16\xe7\xdc\x6a\x11\x3e\x03\x88\xb8\x09\xb5\x48\x11\xc4\x11\xdc\xe0\x64\x37\x08\x8a\x41\xa0\xb9\x51\x99\x0e\xf9\x33\x80\xc7\x41\x14\xc6\x03\x63\x75\x16\xda\x81\x64\x89\x03\x5a\x00\x73\xaf\x97\xcc\x0c\x04\x4b\x8e\x60\xce\x62\xc3\x9f\xa5\xcc\x2e\x0d\x62\xb1\xe0\x16\xff\xe9\x58\x73\x9e\xc9\x10\x7f\xa1\x08\xd2\x1e\x2e\x38\x4a\xde\x5c\xe9\x84\x88\x03\x36\x53\x99\x05\x56\x5b\x0c\x20\x65\x9a\x25\xdc\x72\x6d\x1c\xdc\x01\xb4\x51\xc2\xbf\x5c\x72\x8e\xc0\xea\x8c\xfb\x87\x35\x24\x26\x30\xcf\xe2\x18\x84\x34\x96\xe4\x5c\xcd\x1b\x8b\xa1\xc0\xad\x77\xc5\x9e\x06\xff\xdf\xc2\x3f\xe2\x31\xb7\x7c\x57\x02\xdc\xe8\xff\x7d\x7c\x27\x71\xfc\xb1\x28\xc7\xf1\x8e\x48\xa7\x5a\xfd\xc6\x43\xbb\x0d\x65\x13\x2e\x79\xc2\x8e\xfc\x2f\x00\xbb\x4e\xf9\x11\xa0\x96\x90\x8b\x67\x00\xb1\x30\x3b\x0b\x34\x8e\xed\x90\x88\x84\xc9\xf5\xbf\x09\x5f\xd4\xe2\x4a\x72\x69\x09\xae\x1b\xea\x97\x28\x10\xc8\xa7\x96\xfa\xa2\xb6\xaf\xee\x78\x8b\xa8\x40\xc6\x8c\xde\xbf\xf7\xff\xfd\xf0\x61\x94\xd0\x58\x7c\x86\x18\x7f\xf8\x50\x9b\x95\x19\x6e\x06\xc6\x32\xcb\x07\x4b\x21\x6d\x0d\x6f\x37\x22\x65\x9a\x4b\x3b\x08\x95\x44\x3b\xc2\x75\x93\xe4\x52\xb9\x84\x9a\x33\xcb\x3b\x40\x34\x34\x4f\xf5\x95\xe6\x2c\x1a\x58\x91\x70\x95\xd9\x23\x18\xd7\xde\xd1\x79\xdd\xf4\xd2\x09\x56\xfb\xad\x63\xae\x9a\x75\x6d\x8a\xff\xed\xb6\xae\xf8\x31\x17\xb1\xe5\xba\xf8\x59\xa7\x2e\xd5\x2a\xe5\xda\x0a\x6e\xca\xfd\x9b\x65\xe1\x3d\xb7\xef\x48\xae\x2a\x8f\x3b\xd7\x2e\x11\x5e\x28\xaf\x99\xdf\x54\xa7\x77\x0d\x73\x60\x8a\x5d\xde\x34\xbe\x26\xdd\xee\x35\x8b\x87\x24\xe7\x81\x43\xf1\x4e\xb9\x49\x01\x30\xcd\x4b\x43\xbd\x5a\x72\x49\x06\x29\x56\x0b\x33\x98\x31\xc3\xa3\x0a\x58\x00\x27\x30\x68\x7b\x33\x83\x26\x87\xc1\xc9\xe9\xf5\xcd\xd5\xe9\x9b\x5b\xb4\xa6\xf0\xc0\xe2\x8c\x13\xad\x68\xed\x40\x58\x8f\xc9\x8c\x1b\x82\xea\xd6\xae\x41\x9c\xa9\x4c\x46\x4c\x0b\x6e\x8a\x93\xe7\x84\x05\x18\x2c\x85\xb1\x6a\xa1\x59\x92\xfb\x26\xfc\xd1\x6a\x16\xa2\xb9\xa7\x95\xcc\xb0\x02\xaa\x6b\x3b\xf0\x8f\x3f\xa2\xe3\x21\xac\xe3\x55\xe3\xe5\xc6\x8d\xe9\xd8\x9c\x69\x1d\xd0\xa6\xc1\xdb\xb6\x68\x3b\x88\x96\x4e\xca\x51\xf7\x7c\xab\x91\x5b\xae\x19\x2a\x39\x8f\x45\xd8\x26\x6d\x00\xb1\x90\x9c\xe9\xee\xd5\x06\x08\x9e\xd4\x8b\x60\x71\xf7\x90\x4d\x2c\x2d\xf6\xad\xe3\x79\xce\x51\xa6\x35\x5b\x77\xbc\x6d\x4a\x3c\x81\xe9\x18\xd7\x62\x86\xdb\x71\x48\x32\x63\x61\xc6\x21\x51\x52\x59\x25\x45\xc8\x62\xb2\xdc\x28\x34\x28\x92\x4d\x1e\x95\x6b\x1a\x2e\xa3\x01\x4f\x52\xbb\xae\xe9\xa1\xf6\x40\xd4\xfc\x7e\x1b\xf1\xbf\x1d\xe3\x84\xe5\x49\x27\xed\x39\xf5\x32\x4b\x66\x85\xe2\xa8\xff\x39\x83\x72\x04\x91\xca\x66\x71\x17\x12\x2d\x59\x9a\xc7\x8a\xd9\x3f\x7f\xd3\x94\xea\xc6\xde\x7d\x96\x60\x6f\x97\x83\x5d\x65\x7b\x3b\x94\x2e\xf1\xce\x67\xfc\xeb\x24\x7c\xcb\x79\xdb\x26\xde\x0b\xad\x56\x76\xf9\x1d\x0b\xad\xd2\x9b\x85\x7c\xe3\x36\x3f\xb9\xc9\x8d\x2d\x78\x5b\x59\xee\xa9\xc3\x70\xee\x4f\xc0\x82\xd4\x24\xc6\x0f\x4c\xc2\x41\x5b\xee\x65\x96\x7c\x27\xa4\xb0\x7c\x83\x80\x94\x64\x08\x69\xf9\x62\x2b\x1d\x42\x36\x05\xb0\x93\x8c\x8b\xc6\x92\x9f\x44\xca\xb8\x4d\x8a\x09\x59\xcc\xff\x0d\xdb\x70\x8d\xeb\x7c\x09\xa4\x6b\x62\xf9\xe9\x27\xf3\x6c\x8b\x74\xef\x74\x28\xb7\x01\x68\x9d\x47\x87\xb4\x3f\x8a\x1f\x7d\x12\x9f\xb4\x26\x9f\x73\x1c\xff\x1f\xc9\xb2\x9a\xcf\x4d\x1e\x56\x77\x11\xf0\xc5\x84\xf9\x1d\x2d\xf4\x14\xda\x67\x6a\xc5\xb5\xb3\xe6\xb9\x97\x35\x17\xda\xd8\x0d\x62\x00\xb0\x12\x91\x5d\xfe\x1b\xd0\xff\x09\xd7\xf9\x5c\xa6\x3b\x57\xf2\x46\x24\xbc\xed\x8c\x17\x51\x61\x0b\x43\xe6\xa3\x87\xcd\x8e\xfa\x71\x01\xb7\x32\x06\x03\x96\x77\x32\x6e\xbb\x17\x75\x77\x3c\xb3\x69\x66\x41\xc9\x78\xed\x3c\x72\xc2\x11\x03\x4c\x5c\xd2\x58\x96\xa4\xf9\x46\x38\x6f\x1b\x47\x51\x8a\x8f\xc7\x0d\x67\xdc\x67\x01\x67\x1c\x52\xcd\x0d\x97\x96\x72\x52\x2a\x8e\xb8\xf6\x73\x6b\x06\xf5\x71\x70\x9f\xcd\xb8\x96\xdc\x72\x33\x10\x49\x92\x59\x36\x8b\x79\x03\xdb\x2a\xae\x4f\xb3\xac\xc1\x96\x93\x72\xf2\xd3\xd1\xc8\xa4\xfa\xc2\x91\x2c\x8c\xc7\xbb\x0f\xab\xa5\x08\x97\x2e\xd0\xe0\x11\x08\x09\x91\x0a\xb3\x84\x4b\x97\x3b\xab\x8b\x25\x72\x31\x61\x8f\x22\xc9\x12\x88\xb9\x5c\xd8\x65\xce\xc1\xea\x0a\xc2\xc0\x5f\xc6\xe3\x31\x84\x4b\x46\x41\x84\xae\x30\x27\x12\x06\x79\x11\xb5\x49\x9e\x29\x15\x73\x26\xb7\xd0\xec\xa7\x3e\x4d\xf0\xe9\x1c\x0c\xb7\x18\xe1\xdc\xe8\x8c\xf7\x11\x41\x59\x25\xda\xa7\x34\x09\x1a\x65\x0b\xab\x34\x62\x2c\xa5\xb8\xa1\x0d\x5f\x70\xc9\x35\x85\x48\x72\x0d\xa9\x12\xb2\xea\x38\xb9\x98\xf5\xe3\xf7\xee\xbb\x6a\xac\xdb\xa2\xe2\xf9\x95\x0f\x11\x87\x30\x91\xf0\x0b\x8b\x1e\x98\x0c\x79\x44\x71\xa2\x5f\xf2\xd7\xbd\xa5\xb5\xa9\x39\x1a\x8d\xc2\x58\x65\xd1\x70\x41\x09\xf1\x61\xa8\x92\x51\xec\xd3\x98\x91\x0a\xcd\xe8\x41\xf0\xd5\x28\x07\x70\xe7\xe6\x9a\xfd\x1a\xb1\xf5\xcd\xb7\x0a\x12\x66\xc3\x25\x2e\x06\x5c\x5a\x0c\x16\x87\x30\x7d\x64\x49\x8a\xe2\xdb\xcb\xb3\x9d\x43\x24\xf4\xf5\x82\xf1\x3b\x96\xa6\x30\xb9\x38\xa9\xc1\x34\xfc\x81\x6b\x61\xd7\xff\xf5\x7a\x7a\x75\xf5\xee\xaa\xb7\x45\x6a\x1c\x4e\xb8\xfe\xe1\xb8\x21\x31\xcf\x0b\x98\x31\x9b\xf1\x78\xea\x02\x52\xa5\x77\x09\xf6\x59\x14\x09\x27\x0b\x97\x1b\x4c\xdd\x6e\x3b\x75\x56\x5f\x79\x97\x93\x96\xb0\x14\xe6\x5a\x25\xc0\x1c\xde\x70\xcf\xd7\x7e\x1d\x4a\x7f\xca\x3c\xb4\x56\x45\xf6\xbb\x7e\x86\x3b\xf6\xc4\xcf\x40\x75\xc9\x0a\xe0\x7e\x87\xd6\x4e\x5d\x51\x16\x80\x19\x23\x16\x12\x18\xa5\x00\xea\x5e\x12\xa1\x42\x81\xdd\x10\xa6\x0c\x77\xb8\x44\xce\x65\xd5\xdd\xd9\xa7\x14\x39\xbe\xca\x15\x8c\xd2\x2e\x10\x5c\xb2\x87\x3a\x48\x86\x0b\x19\x15\x0a\x66\x79\xd4\x49\x94\x83\x87\x87\x8e\xa5\x4e\x01\x9b\xb5\xb4\xec\xb1\x91\x63\x68\xf8\xe1\xd5\xf9\x2e\x97\x61\x58\xc2\x91\xa8\xbc\x0c\x10\x10\x1d\x77\xc5\xf4\xc0\xb1\x60\xe8\x43\x1d\x9f\xb8\xa8\x01\x75\xe9\x12\x61\x20\x54\xf2\x81\x6b\xeb\xf8\x8a\xc0\x28\x85\x12\xf1\xb9\x90\x25\x03\x1c\x6f\xa2\x82\x03\xa4\x50\x9a\x2c\xf5\x15\x8c\x0a\x21\xa4\x5c\x75\x09\xd5\xad\x45\x94\xcc\x99\x88\x4d\xbf\x02\x7c\x25\xe2\xb8\x83\xa7\x88\x09\xcb\x62\x9b\x6f\xd5\x0d\x69\xd6\xca\x23\x62\x02\xcb\x05\xca\xc1\x12\x86\xc4\x0a\xe3\x6d\xff\xa2\x5f\x83\x4a\x53\x64\xee\xaf\xe5\x93\xac\x81\x71\x9f\xc4\xc6\x81\xf4\xea\xb7\xf2\x3a\xa0\x4c\x61\x30\x84\x0b\x65\xeb\x78\xda\x25\xb3\x48\x8c\xe6\x94\xd4\xca\xd2\x34\xf7\x6c\x0c\x28\xc7\xc3\xfc\xbc\x3b\x2f\x05\xb7\x9c\x00\x53\x71\x86\x06\x74\xb8\x2f\x6a\x0e\xc8\xc6\x07\x4e\x06\x1a\x0c\xa7\x64\x15\x2d\x86\xcb\xb0\x38\x56\x2b\xb7\x4b\x2c\xcf\x10\x96\xda\xd8\x69\xf6\x52\x6c\x3f\x3e\x27\x78\xde\x80\xb0\x43\x5a\x70\xcb\x94\x2d\x99\x41\x6f\x84\x4a\x01\xab\x9e\xa4\xa2\x8c\xb8\x35\x31\x48\x22\x99\xc9\xe2\xec\xf6\xd1\x6c\x65\x86\x9b\x8a\x0c\xb5\x97\x21\xd8\x0c\x4e\xa6\x67\x37\x93\xae\x6c\xe3\xbd\x90\x51\x1f\x4e\x2f\x6e\xfe\xfc\x4d\x25\xc3\xd8\x77\xf3\xa4\xaa\x6e\x21\x83\x4c\x0a\x8b\x5b\xd6\x3b\xe8\x0d\xe1\x3a\x0b\x97\x0d\x51\xf6\x30\x43\x95\x49\x6b\x2a\x5b\x4e\xb2\x50\xda\x17\x67\x71\x48\x3d\xe2\xd9\x76\x16\x21\xa8\xa8\x81\xb6\x07\x12\xe0\x7e\x05\x7d\x08\x10\xbb\xc0\x09\x71\x50\xe1\xb7\x57\x08\x26\x3f\xcf\x81\x43\xe5\xae\xe4\x44\x50\xc7\x55\x73\x50\xa5\xb3\xe8\x00\x3a\x65\xe1\xca\x79\x3c\xf2\x59\x58\x5b\xac\xde\x5e\xb4\x7e\xe6\x48\x29\xe7\x0a\x35\x17\x97\x21\xdc\x54\x92\xae\xe5\x06\xe7\xbc\x22\x51\xd7\x3c\x54\x3a\xa2\x8d\xcc\xab\xae\x4d\x0b\xe1\x39\xe8\x12\x75\xfd\x7c\x47\xab\xbb\x58\x6c\xf5\x86\xa4\x31\xea\xf4\x7a\x76\x98\x3b\x73\xc3\x62\x25\x17\xf9\xec\xb6\xa6\x6d\x6a\xf7\x1a\x71\x68\x58\x3a\xcc\x8e\x1f\xd8\x16\xc7\xc2\xb0\xa0\xb4\xe4\x52\xe0\xe6\x48\xaf\xf3\x9b\x5a\x17\x0d\x16\x89\x5f\xb7\x1d\xf5\x53\x72\x7d\xef\xb7\x9f\x40\x96\x54\x98\xa0\xe1\xdc\x7b\xeb\x94\x8b\x09\xf2\xcf\xef\xaf\xa3\x9f\xc4\xec\x23\x65\x2a\x64\xd2\x47\x0c\x59\x1a\x31\x57\x37\x0f\x11\x25\x81\x71\x7b\xbc\x46\xe1\x9a\x8b\x45\x46\x8e\xde\x05\x5f\xe5\x67\x2b\x64\x12\x27\xb1\x28\x22\x4d\xd7\x50\xbb\x9d\xeb\xf6\x61\x96\x59\xe0\x8f\xc2\xd8\x82\x81\xa6\x82\x40\xa2\x22\x67\xdf\x5d\xdb\x42\xcb\x36\xd8\x25\x17\xba\xaa\xaf\x76\x49\xf6\x6f\x88\x5e\x60\xb3\x6f\x05\xbb\x46\x31\xb0\x2d\xc2\x6b\xae\x4d\xc1\x8d\x65\x02\x3d\xf9\x56\x94\xc3\x1b\x41\x8e\x67\x6e\xd6\xd4\xa9\xf8\xb7\x3d\xf0\x89\x84\x49\x63\xb6\xbe\x60\x49\x2b\x3b\xf6\x11\xe4\x96\x40\xb6\x53\x14\x2a\x19\x0a\xc3\xdd\x29\xc8\xdd\x9e\x4e\x52\x3c\x5e\x6d\x59\xc1\xbf\xcc\xa0\x6f\x2d\x2d\xd7\x73\x16\xa2\x27\x7f\x6b\xd0\xaa\x4a\xcb\x51\x14\x43\x66\x5c\xd3\x0a\x95\xdf\x25\x70\x19\xf9\x46\x0a\xa1\xa2\x3e\x2d\xcb\x9d\xe7\xdf\x02\xdc\xc3\x18\x85\x1b\xeb\x94\x7b\xaf\x1a\x37\xa3\xde\x54\xde\xda\x91\x5c\x0a\x8b\x8f\x50\xa9\x25\x09\x97\x91\xf3\xbc\x66\x6d\x98\x18\xb0\x39\x4f\x65\x9d\x87\xd4\x2d\xb3\x88\x04\x0d\x1e\x84\x11\xb3\x98\xdc\x2b\x14\x67\xd3\x07\x93\x85\x4b\x60\xed\xbc\xd1\x7f\x67\xca\xb2\x61\x5b\x23\x6d\xc8\x13\x76\x15\x52\xba\x02\x82\x27\xd3\x7b\x48\x4a\xe9\xf0\x90\x52\xaf\x48\x9e\xab\xc8\xbb\x62\x1d\xba\x72\xbe\xa9\xa5\x43\x20\xcb\x06\x80\x6a\xec\x8a\xc8\x0e\xe1\xbb\x72\x7b\x9c\x5f\x19\xb0\x34\xe5\x72\x21\x24\xf7\xf1\x20\x4b\x85\xa1\x98\x10\x83\xc5\x11\xf5\x46\xe9\x91\xe6\x26\x55\xd2\xf0\xbb\x98\xa1\x14\x08\xde\x50\x86\x50\x3a\x03\x64\x27\x96\xcc\x14\x91\x4c\x2e\x88\xdf\xdf\xdc\x5c\x42\x0e\x08\x42\x15\xf1\x3e\x04\x05\x60\xfc\x1d\xf4\xdb\xdb\xab\xa8\x9f\x0a\xf9\x10\x2b\x75\x0f\xcc\x42\x81\x02\x41\x36\x59\x18\x72\x63\xe6\x59\x5c\xc0\xa6\xbe\xa0\xdf\xd0\x48\xcc\x3b\x92\xf5\xe5\x28\xe2\xf0\x9c\x74\x40\x77\x32\xf5\x89\x82\x54\xab\x18\xd5\xce\xe6\x6d\xa8\x43\x6d\x49\x30\xef\xee\x34\x76\x0a\xd5\xf6\x2c\xed\x56\xfd\x5b\x45\xad\x53\x2d\x35\xd0\x7b\x4a\x17\xb7\x17\x84\x09\x2c\xb3\x84\x49\xea\x1c\xa0\x4e\xb3\xaa\xea\xcd\xe5\x84\xa4\xa6\xab\x4a\xb8\x63\x82\xac\xfc\xbb\xe7\xeb\x2f\x43\xe4\x0f\xbc\xab\x52\xda\x95\xa1\xcf\xc3\xe3\x2f\x82\x3f\xb9\x11\x37\x88\xeb\x17\xa1\xe2\xc7\x1c\xdc\xf6\xc1\x3b\x89\x5d\x01\x6b\x2a\xb3\xe4\x69\xde\x3c\xbf\xc9\xa3\x5b\x35\x77\xf9\x88\xaa\x7a\x73\x09\x88\x32\xb8\xde\x22\x01\x00\x97\xca\x38\x45\xee\xdc\xd8\x23\x40\x4f\xf5\xe2\x6d\x1f\xde\xbc\x7b\x77\xe6\xa3\x90\x3e\x9c\xbc\xbb\x7d\x73\x36\xed\xd7\x1c\xd9\xb6\x76\x71\x7f\xe7\xef\x2e\xa6\x3f\x3f\xff\x12\x1b\x06\xc0\x65\x96\x74\xef\xd6\xc0\x23\xba\xe1\x25\x62\xbf\xe1\x15\x91\xb4\xe1\x9d\xa3\x73\xd3\xcb\x0a\xf1\x1b\x86\x10\xed\x0d\x53\x97\xc9\x70\x79\x6d\xd9\xe2\x33\x5c\x96\xb3\x12\xc8\xa6\x81\x4f\x8a\x59\x01\xa1\x43\xc4\xea\xc2\x55\x0f\x93\x1d\x01\x60\x70\x6a\xdd\x9f\x73\x09\x1b\x4a\xf1\x0d\x0b\x39\x6a\x31\x26\x97\xab\xdb\x8b\xd3\xf3\xcb\xb3\xe9\xf9\xf4\xe2\x66\x7a\xd2\x87\xcb\xab\xe9\xd9\xe4\xf6\xe2\xf8\xfb\x3e\x4c\x27\x57\x67\x3f\xdf\x4d\x8e\x8f\xa7\xd7\xd7\x7d\x98\x9c\x5d\x7e\x3f\xe9\xc3\x9b\xe9\xcd\xa4\x0f\x6f\x27\x7d\x38\x99\x5e\x5e\x4d\x8f\x27\x37\xd3\x93\xa6\x50\x75\x49\xc7\xa0\xbe\x50\xeb\x6d\xb1\x6e\xeb\x4d\x15\x8d\xd6\x4b\xc2\xaa\xf5\x14\x91\x6c\x3d\x7c\xdb\x7e\x54\x92\xd0\xb9\x7d\xfe\x1c\x0c\x32\x99\x6b\xf2\x8e\x23\x91\x70\xcb\xf0\xa8\x7f\x7a\x75\xf5\xdc\x43\xf8\x44\x11\xda\x30\x7d\x43\x86\x25\x1f\xdd\xf6\xf9\xa9\x7b\x36\x13\x11\xfe\x72\x32\xd5\x36\xe8\x65\x31\xe8\xd3\x19\xb6\xdd\x76\x0b\xb9\xe0\xc6\x9e\xf0\x98\x75\x1a\xb5\x27\x8c\x41\x83\xb3\xa7\x25\xb0\x27\x9c\x04\x9f\x4b\x8c\xd9\xba\x50\xdd\xae\xa0\x01\x21\x23\xe6\xcc\xd6\x1e\x35\x3a\x55\x27\x6d\x7e\x7b\xd2\xdc\x24\x57\xfb\xa2\x2a\x20\x39\xa7\xc8\x4f\xa6\x39\x2c\x32\xa6\x99\xb4\x3c\xf7\xf3\x3d\x4c\xee\x33\xd4\x0f\x4c\xc4\x6c\x53\x37\x8e\x9b\x80\x8c\xed\x63\xac\x1a\x67\x14\x90\x10\xaa\xb1\x32\x06\xa2\x8c\x53\x3a\x5c\x6b\xa5\x5b\x0d\x2c\x00\x86\xbc\xe1\x4b\x8a\x5f\x3e\x9f\xb3\xd7\x15\x68\xbb\xb0\x96\x56\x2f\x03\x28\x64\x72\xae\xac\x4a\x5e\x3b\xbf\xdd\x87\x37\x9d\x3c\x70\x42\x8b\x9c\x5c\x69\x61\x2d\x97\x1e\x9e\x6b\xbf\xea\x53\x4e\x8a\x87\x19\x65\x49\xab\x9b\x88\x33\x8c\x55\xba\x23\xb2\xc5\x3f\xca\xd7\x0a\xe3\x12\xab\x14\x12\x3e\xb0\x78\x17\x2e\xfb\xac\xe8\x06\x64\x5d\x9a\x68\x29\x16\x4b\xae\x61\xa1\x99\xcc\x62\xa6\x85\x5d\xe7\x59\x1d\x93\xb0\x38\xe6\xba\xc9\x9c\x61\x47\xf2\xf1\x07\x21\x5b\xbb\xb6\xbb\x9d\x3a\x2f\x60\x7c\xba\x8e\xf1\x00\x9e\xb4\x52\x3f\x2d\x79\x91\xfa\x4f\xf2\x6b\x00\x2e\x67\xe7\x62\x36\xcb\x24\x57\x99\x29\xf2\x73\xe1\x92\xe1\x21\x68\x2b\x1b\x05\xcc\x0d\xea\x03\xb7\xe1\x10\xae\x55\x82\x01\x55\x32\x13\xd2\x5f\x5c\x50\xf3\x27\xd2\x52\xed\xe8\x8d\x2e\xca\xf8\xd4\x8f\xc9\xd2\x54\x69\xcb\xa3\x61\xdb\xdd\x7a\x3b\xb9\x7d\x8b\x8e\xd5\xf4\x0c\xed\xdd\xf1\xed\xf9\xed\xd9\xe4\xe6\xf4\xc7\x69\xd3\xd6\x7d\x84\xeb\xd4\x6d\x16\x69\xa1\x0e\xcb\xd4\x4c\x42\xe3\xd3\x12\x8d\xba\x88\x28\x29\x48\xb8\xaf\x7c\x05\x12\x3d\xd6\x4f\x0f\xe3\xcf\x3b\xc1\x7d\x72\x0a\xea\x8a\xb3\x68\xe0\xda\x0c\x4e\xe7\x79\x87\x80\xaf\x3a\xb3\x6a\x2d\xa3\x52\x6c\x17\x11\x97\x96\xd2\x72\xed\xc0\x8f\x69\x9f\x23\x9c\xad\xcb\x3c\x7a\xd1\xeb\xcb\xda\xe8\x97\x62\xdc\x27\x7f\xbc\x6d\x73\xda\xb9\x94\x66\x4a\x81\x0c\x25\x12\xe1\x1d\xf9\xda\x60\x25\xdb\x1a\x3b\xf7\xc8\x72\x64\x8a\xbb\x30\x04\xce\x50\x47\x27\x8f\x60\xc9\x35\x6f\xdb\xd2\x9d\x45\x6a\x97\x46\xd1\xad\xc1\xf9\x06\x7d\xdf\xd0\x0b\x1d\xa3\xe4\x67\xe5\xfa\x3a\x92\x7c\xbb\x0b\x14\xda\x93\x82\x9b\x94\x04\x6c\xba\xbf\x45\x69\xf2\x53\x39\x6b\x3b\xa2\xd1\xdd\xa9\xeb\x08\x3e\x77\xa7\xee\x79\xa5\x06\xe6\xea\x4b\x42\xe6\x66\x48\x58\x03\x27\x17\xd7\x8e\xe8\x54\xf3\xb9\x78\x74\xc1\x80\xed\x0a\x77\x85\xeb\xd2\xb8\xbd\x3a\x1b\x70\x19\xaa\x88\x9a\x26\xe2\xd8\xe5\x09\xf3\xa2\x6e\x65\x25\xe3\xec\x12\x72\x32\x5f\xa4\x05\x33\x08\x33\x63\x55\xd2\x48\xa2\x05\xa0\x34\x04\xfc\xd1\x72\x8d\x4e\x66\xe3\x65\x6e\x25\xdd\x22\x6d\xc7\x64\xa9\xb2\x38\xa2\x3b\x84\x0c\x24\xb3\x99\x66\x31\x2c\x05\xd7\x4c\x87\x4b\x34\xec\xb0\xd0\x2a\x4b\x85\x5c\xd4\xf2\x7a\x47\xd0\xeb\xc4\x65\x24\xe4\x83\x12\x21\x1f\xa5\x4c\x44\x23\x96\x50\x0e\xb6\x9d\xa3\xdd\x80\xec\x28\xd5\x2a\x41\x03\x96\x99\x51\x96\xf6\xa0\xf7\x89\x79\xc3\xde\x27\xdb\x89\x4c\x8a\x56\x57\xde\xee\xa2\x77\x2b\x45\xf3\xfc\xb7\xa5\x0b\x97\xa0\x52\x8d\xd3\xb7\x95\xa3\x53\x74\x02\x68\x9e\x5b\xc6\x53\x0b\xa2\xbd\x69\xa4\x0a\x2b\x77\x29\xc5\xbc\xda\x7c\xe0\x4a\x42\xc2\x40\x40\x41\x7d\xd0\x87\xc0\x45\xf0\x41\x9f\x24\xa5\x1a\xb1\x07\x6d\x57\x95\xca\x4d\x88\x64\xe0\x7b\x0f\x5c\xd9\x4a\x73\x6f\x3c\x58\xb5\x86\xe1\xdc\xba\x1a\x01\x1d\xde\xef\x89\x98\xcf\xb9\xe6\xd2\x82\x59\x1b\x54\x88\xde\x13\xa0\x8e\xde\xb2\x28\x66\xbc\x97\x9d\x28\xcd\x81\x33\x23\xe2\x75\x59\x44\x68\x01\xdd\x33\x85\x8b\x42\xae\xc8\x78\x38\x3e\xbc\x7f\xb3\x0e\xe0\x8e\x80\xdf\xd5\x4b\x10\xcc\x40\x70\x38\x7e\xb3\xf6\x25\x59\x3f\xb1\xcb\x70\x04\x2f\x5f\x1d\xbe\xdc\x0e\xe8\xe5\xf0\xd5\xf9\x9b\x75\xb0\x3f\x84\xef\xd5\x8a\x3f\x70\xdd\x2f\xb6\x80\x18\xd7\xa5\x0c\x82\x7b\x5a\xdc\xe6\x77\x6b\x0a\xc4\x2b\x02\x80\xa6\x30\x5e\xb1\xb5\xaf\xe4\xa9\xcc\x30\x6a\x5b\x98\xc3\x6c\x6d\xb9\x69\xe7\x96\x24\x35\x45\x59\xae\x61\xa9\x56\x20\xac\xe7\x6b\x15\x61\xb2\xfb\x6b\x95\xc1\x8a\x49\x0b\x0c\xdc\xb1\x2d\x34\x8f\xea\xc8\x5d\xa3\xbf\xe8\x9b\x47\x58\x68\x2b\xa5\xf1\xe3\xcb\xdb\x81\xe1\xa1\x42\xa4\xf2\xb8\x8c\xc1\x6f\x6a\xd6\x2f\x12\xe8\xae\x82\xdc\x02\xca\xa4\x97\xc6\x8a\x17\x15\xe4\x48\xac\x96\xca\x14\x32\x87\xac\x32\xef\x8f\x2f\x6f\x3f\x04\xb0\x87\x0a\xe7\xf7\x4c\x3c\xb0\x98\x4b\x1b\xb7\xc3\xc8\xe0\x20\x1f\x9a\x67\xe3\x03\x83\xdb\xe2\x7a\x62\x10\x33\xd7\x78\x70\x70\xd8\x1f\x8f\x5f\x55\xf1\x2f\x77\xa2\x3b\x37\x83\x78\xe4\x51\x0e\x6e\xf9\xc1\xe1\x78\xfc\x2a\x40\x15\x4e\x9a\x0b\x63\x1c\x0c\x79\xc4\x66\xde\x76\xb9\xd4\x9e\xb7\x14\xd0\x50\xab\x08\x89\x7a\x1e\x9c\xc0\x8a\xad\x9b\x9c\x04\x96\x9f\xde\x2a\xe7\x36\xd5\x45\x5a\x9c\xbc\xf7\xfc\xe9\xe7\xbd\x2d\xee\x92\x75\xe5\xd0\x21\x69\x43\xa4\xad\x7d\xc0\x0a\x87\xd0\x51\x3f\x3a\x18\x8f\xc7\xc1\x3e\x29\x11\xb4\x17\xc1\x0f\x22\xe7\x3e\x02\x77\x70\x83\x83\x83\xe1\x7f\x1e\xbe\x0c\xfc\xec\xce\x93\x90\x43\x3b\xfc\x06\xf7\x8a\x02\xd4\x3c\x16\xf0\xda\x91\x5a\x6a\xc0\x64\x33\x5f\xae\xfa\x05\x07\xdd\xca\x6e\x9f\xf4\x58\x45\xae\x12\x79\x4b\x73\xd5\x1c\xce\x39\x33\x99\xe6\x65\x2f\x22\x41\x55\xf3\xc4\x3d\xa7\x7b\xeb\x59\x98\x25\xc3\xa5\x4d\xe2\x7d\xc0\xb8\x28\x62\xba\x23\x30\x7f\xf1\xe2\x0d\x33\x22\xf4\x58\xed\xdd\x5e\x9c\xde\xec\xbf\x78\x01\x2f\x20\x98\x21\x87\x67\xc2\xe2\xff\x51\x57\xe0\x01\xc5\xff\x9b\x00\x9c\x84\xe1\x8f\x44\xc8\xae\x20\x48\x66\x6e\xec\x32\x80\xa5\xca\x34\xfe\x37\x0a\x20\x62\x6b\xfc\xdf\x41\x00\x91\x48\xb8\x34\x42\xb9\xfb\xf3\x2f\x5e\x5c\x92\xaf\xc1\x0d\xec\x5d\x5e\x4d\xbf\x3b\xfd\xdb\xfe\x8b\x17\x6d\x4c\x21\xb8\x0f\xe0\x5e\xc4\x0a\xf6\x0e\xc6\xff\x78\xb9\x8f\x4f\xce\xf1\x88\x2d\x18\x3d\xf9\x33\x3d\x79\x1b\xc0\x42\xf8\x27\x7f\xa5\x27\x37\x01\x58\xae\xdd\x93\x83\xc3\xfd\x2e\xc0\x97\x01\xa4\xdc\xfa\x21\xaf\x68\xd6\x34\x40\xe5\xe0\x9e\xfc\x85\x9e\xfc\x3d\x80\x3f\xb8\xf5\xa3\x0e\x0f\xe8\xd9\xcf\x01\xac\x95\xed\x48\xd6\xd0\x98\x6f\x68\x4c\x12\x40\x22\xe2\x58\xd0\xb3\x81\xc3\x3b\xc3\x67\xa1\x76\xa4\x0c\x1c\xe6\x32\x00\xc9\xa4\x7f\xf4\xd7\x4e\x3c\xd3\x00\x52\x11\xfa\x21\x07\x87\x34\x6d\x1e\xc0\x9c\x27\x36\x7f\xe8\xb0\x67\x01\x30\x5b\x3c\x73\xf8\xff\xd1\xde\xa9\x3f\x78\x9a\x0f\xf2\x04\xad\x91\xa0\xb0\x78\xe8\x28\xf8\x41\x20\xe3\x67\x02\xf6\x0e\xff\x71\x30\x76\x8c\x17\xc8\xf9\x99\x68\x13\x7e\xf8\x8f\x43\x37\xe4\xad\xc0\xad\x70\xb3\x5e\xba\x47\x37\x02\xf7\xc2\x3d\xfa\xc6\x3d\xba\x14\xc8\x7b\xf7\xe8\x15\x3e\x7a\xf1\x56\xb3\x24\x61\xba\x43\x04\xf0\x8c\x2c\xdc\x5b\x60\xb1\x51\xde\x75\x75\xf6\x9b\xaa\xa1\x52\x72\xd7\xa6\x8a\xa0\x47\x28\x68\x0f\xc2\xf8\xd6\x40\x8d\x76\xbd\x8d\x2f\x33\xae\x3f\x6f\x2e\x1e\x41\xa5\x5c\x33\xab\xf4\x7e\xcd\x15\x34\x7d\xb2\x6d\xa3\xf7\x3c\x61\x22\x76\xaa\x38\x38\x17\x6f\xd6\xa3\x83\x71\xd2\x51\xc2\xdd\x63\x31\x9a\xb6\xc5\x92\x34\x9d\xf7\x3f\x59\x9c\x28\x63\x41\xa2\x31\x75\x8e\x70\x30\x32\x81\xd7\x91\x4e\xb7\x39\xad\xf6\x2d\x22\xba\xd9\x8f\xf5\xe6\x73\x46\xa9\x8a\x94\xbe\x09\xc1\x2c\xfc\x9e\x71\xbd\x76\xc1\x2e\xb5\xa8\x5a\x72\xc3\x22\xae\xe3\x35\x3a\xf6\x61\x96\x64\x31\xa9\xf4\xb6\x3f\xa0\x21\xe2\xb1\xf5\xee\xc2\xfe\x10\x19\x37\x0c\x20\xc9\x62\x2b\xc8\xfd\xca\x3b\x2b\xe9\x9a\xb6\xa1\xb4\x7f\x8d\x69\x6d\x88\x1b\x99\xf8\xf6\xcd\x7a\x18\x39\xf6\xdd\xbf\x5f\x31\x6b\x3f\x0c\x97\xc1\xb0\xb6\xad\xd4\x84\xd0\x02\x49\xdd\x6e\x14\x46\xc3\x5c\xc5\xb1\x5a\x19\xba\xce\x97\xf7\x1d\xbd\x86\xe3\xfc\x0e\xf9\x11\xbc\x87\xde\xb0\x57\x3e\x80\x0f\xf8\x64\xd4\x76\xd1\xab\x23\xbe\xad\xfc\x7a\x0d\x7b\xf0\x0b\x38\x3d\x04\xbf\x02\x6a\x44\xf8\x27\xf4\xbe\xee\xc1\x3e\xfc\x02\x13\x29\x95\x77\x10\x7f\x6d\x81\xfc\x67\xf5\xf5\x3f\xa1\x77\xd0\x83\x6f\xab\x8f\x5e\x43\xef\x7d\x0f\x2e\x26\xe7\x53\xe8\x7d\xc0\x77\x17\xca\x72\x27\xab\xe5\xa8\x4e\xdf\x8a\x2c\x3f\x03\xd7\xc6\x61\xd1\x28\x0b\x9b\xb3\x02\x0d\x28\x62\x19\x14\x5e\x01\x2b\x97\xf4\x8d\xcc\x6d\x77\x25\x56\x92\x57\x9c\xb5\x9c\xc3\xa5\x33\x82\xd6\x3c\x38\x08\x5a\x9b\xf8\x5e\xbb\xf6\x93\x0f\xa3\xb6\x8c\xbe\x7e\x0d\x07\x23\x83\xfe\xf8\x9b\xf5\x7b\xab\x99\x34\x09\x3a\x18\xd1\x87\x91\xc1\x77\x6f\xd6\x23\x13\x90\x84\x21\x0f\xc8\x7e\x33\x30\x08\xce\x77\x5a\x48\x25\x07\xb3\x98\xc9\xfb\x76\x5a\x46\x0b\x49\x21\x0d\x4c\xae\x8f\x4f\x4f\x2b\xbd\xea\x14\x84\xfa\xbb\xf9\x28\xeb\xc1\x7b\x27\x61\x1f\xdc\x4a\x07\x41\xe9\xd8\xb7\x11\x76\x8d\x94\x4c\xaf\xe1\x97\xba\x4d\xc2\xc7\xa5\x75\xe5\x72\xb8\x12\xf7\x22\xe5\x91\x60\x64\x5b\xf1\xd7\xe8\xa4\x3a\xe3\xee\xf7\x8c\x49\x2b\xec\xba\xad\xb8\xd5\x1c\x0e\x8a\x3e\x19\x3c\xf1\xc1\x01\xf1\x81\x42\x1f\x0c\xbe\xfc\xed\x5e\x72\x38\xe9\x6a\xba\x54\xb2\xf0\x9c\x67\x68\x9f\x3b\x4f\x84\xf7\x22\xd2\x54\xab\x54\x0b\x66\x9b\x8d\x29\x3d\xc9\x57\x14\x8d\x1b\x48\xb9\x46\xf3\xdb\xcb\x0b\x32\x05\x4b\xba\x44\x03\x1d\x98\x91\x3f\xa7\xef\x25\x5f\x0d\x08\xc6\x07\x7c\xb4\xe7\x42\x8b\x5a\x38\x17\xbc\x0a\x60\x45\xba\x29\xe1\x4c\x42\xef\x55\xdb\x89\xcf\xf1\xd8\x6f\xb9\x97\xbd\x5a\x10\x90\xb2\x05\x87\x07\xc1\x57\x15\x8c\x09\x74\x0b\x64\x9d\x06\x87\xf3\x78\x3c\xce\xd1\xbe\x2f\x08\xb8\x7f\x8f\x40\xef\x08\x68\x9b\x84\x0d\xae\x31\x06\x49\xaf\x86\x2f\x1b\x74\xbd\x1c\x8f\xbb\x30\x74\x3a\xf3\xeb\xad\x72\x56\x17\xae\x62\x91\x03\xf4\x38\x7d\xbc\xe6\xce\x2c\xcf\x13\xd7\xb0\x10\x0f\xee\xd3\x03\x29\xd7\x21\x46\xa5\xad\x22\xb0\x8b\x10\x9b\xe1\xb5\x93\x8a\x52\xaa\x7c\xcb\xa4\x66\xd2\x15\x72\xc7\xc3\x61\xb9\xe8\x26\x37\xdb\xef\xec\xcb\x80\x68\x37\xd0\x7b\x99\xa3\xe1\xc9\x45\xf7\x66\xf8\x35\xda\xaf\x08\x6d\x04\xb5\x3e\x6f\x60\xa9\x3f\x9b\x38\x82\x4c\x70\x7f\x13\x72\x88\x99\x4b\xa1\xba\x0e\x7d\x0c\x94\xbd\x19\xea\x10\xd3\xd9\x1a\x0e\xc6\x63\x22\xa3\x16\xbb\x56\x19\xe6\x43\xe8\x3a\x51\xe3\xe1\x38\xa7\xab\x9d\xb7\xa9\xd2\x59\xcf\xb1\x6c\x6c\x23\xd9\x3d\x7b\xb2\xa9\x75\x64\xd7\x32\xc9\xb6\x76\x91\x6d\x55\x12\x8a\x09\x9c\xe1\x30\x95\xcb\x08\x7d\x60\xee\xd2\xbd\x90\x8b\x01\x55\xb2\xda\x27\x97\x22\xe3\x4f\x2f\x90\xb8\x70\xbd\x23\xaa\x7f\xb2\x40\xf2\x19\xfd\x28\x9d\x9d\x28\x9f\x5d\x48\xe9\xec\x3b\xe9\xec\x38\xe9\xee\x35\xd9\xd0\x65\xb2\xb5\xbf\xa4\xd9\x59\xd2\xcc\x8a\xef\x76\x91\xaa\x91\x0d\xdf\x74\xe1\x8d\x2e\x69\xc6\x82\x4b\x3b\x28\x5a\x89\xf2\xb4\x4d\x5e\x2f\xd1\xc5\x85\xb4\x46\xca\x3f\xe8\x51\xc9\xf2\xce\xf5\xa4\xa2\x07\xd0\x93\x0b\x21\x1f\x47\xde\x5b\x30\xbd\x32\x4f\x5b\x42\x73\xba\x8a\xbe\x15\x46\x25\xeb\x1a\xc8\x83\xda\x95\x34\x12\xad\x90\x84\x97\x7c\x7e\x97\x22\xa4\x6b\x6c\xe4\x06\x91\x9b\x5b\x8c\x3e\x82\x60\x32\xf8\x3b\xa2\xc1\x06\x7f\x34\x1a\x22\x83\xf1\xe0\xaf\x65\xba\xc0\xb5\x80\xb2\xb8\xba\x54\x70\x37\x18\xf6\xff\xf4\x1f\x2f\x9e\x3f\xef\xef\xed\x7f\x3d\xf2\x2e\xea\x5c\xe9\x15\xd3\xd1\xc0\xc4\xcc\xd4\xc3\xfe\x62\x2a\xec\x05\xa3\x60\x1f\x22\x2e\x95\x53\x89\x79\x9e\x99\xca\xfe\x2e\xad\x2e\x78\x48\xd7\x06\xdc\x87\x63\xca\x46\xf1\x66\xa2\xc6\x5d\x4f\x2e\x41\x7b\x77\x00\x81\xd4\x2e\xb2\x94\xdc\x2c\xae\x7c\xb9\x5e\xe0\xd6\x2d\x03\xbf\xd0\x2f\x95\x8c\xfd\x53\x2e\xce\xa5\x53\x86\x6e\xb8\x90\x8b\xfd\x7a\xea\xb3\x48\x0e\x16\x1f\xd4\x69\x23\xc5\xd2\x94\x33\x6d\xf2\xcb\x01\xc1\x2f\xe7\xd3\x9b\xab\xd3\xe3\xbb\xd3\x93\x5f\x03\xaa\xb6\xd5\x20\xd2\xf7\xb6\x72\x9d\xe2\xee\x7a\x4c\x2e\x4f\xcb\x6f\x4f\x55\x5c\xd6\xf6\x52\xf9\x47\x53\xaa\x25\x89\x1a\xf4\xe2\x32\x65\xd0\x2b\x3e\x14\x95\xac\x07\xfe\xff\xc5\x77\xa2\x48\x72\xbf\x3e\xfc\xae\x22\xbb\xcf\x6b\xc7\x6b\x07\x55\xe2\x61\x7e\xfc\x69\xbd\xac\x7d\x7d\xa9\x75\x60\x37\x56\xa7\xfc\x82\x95\xbc\x7b\x71\xdb\xa5\xbb\xaf\x66\x47\x8d\x98\x7f\xa5\x8a\xf2\xdb\x61\xfd\xdc\x0f\x0a\x54\x8e\xe0\x38\x56\x59\x94\xff\x4c\x98\x64\x0b\xae\x47\x6d\x62\xfc\xed\x9c\xa3\x76\xfd\xc7\x7d\x66\xab\xb1\xbe\xbb\xbf\xf1\xe5\x6f\xb6\xdf\x16\x70\x2b\x63\x3e\xf1\x66\x7b\xcc\x8c\xf5\x88\x6e\xbf\xdc\xde\x66\xc3\xbf\xe6\x72\x3b\xd9\xd0\xe2\xe6\xec\xc7\x8b\xe0\x8f\xb5\xf9\x1b\x4d\x47\xf5\xe6\x6d\xfb\xb2\x92\x30\x8d\xef\x6d\xe5\x9f\xd2\xda\x78\xb1\xaa\xe3\x4a\x56\x79\xfd\xb6\x5e\x43\xf1\x69\xe6\xe6\x75\xdc\x21\xdc\xac\xea\x46\x24\xff\xde\x9d\xef\xd1\x29\xb2\xb0\xc8\x64\xe7\x0f\x96\xf7\x47\x8f\x20\x98\xfe\xed\xe6\x6a\x72\x7c\xb3\x47\x9b\xb3\xef\x22\x88\xab\xe9\xdb\xe9\xdf\x2e\xef\x6a\xaf\xea\x16\x45\xf3\x05\x7f\xdc\xf7\x66\x82\xe9\x05\xdd\x63\xc1\x05\x8f\xe0\x60\x98\xcb\xfb\xcd\xb2\x7e\x5e\x9b\x77\x88\x91\x96\x1a\xd4\xb2\x7a\x56\xe4\xeb\x5d\xf5\xa8\xb8\x65\x3b\x84\xc3\xa1\x5b\xfc\x08\x26\xf8\x1f\xca\xb1\x57\x2e\x64\x11\xcb\xeb\x95\xe7\x25\xcf\x3f\xe1\x7a\x35\x3d\xcc\xef\x04\x17\x56\x60\x21\xec\x32\x9b\x51\x01\xd2\xd5\x23\x47\x9a\x1f\x3a\x53\x70\x4d\x43\xf7\xa9\x25\xa1\x06\x92\x01\xae\x12\x73\x08\x59\x6a\x33\xca\xf6\xab\x2c\xed\xbe\x3a\x5d\xf9\x90\xa8\xbb\x55\x5f\x65\x41\xfb\xae\x61\xad\x84\x54\xdc\x70\xa9\x5d\x23\x2e\xae\xe3\xce\xf8\x5c\xb9\x70\xd8\xe5\xbc\x5c\x49\x6f\xc1\x1f\xeb\x60\x4f\x73\x27\x98\xfc\x15\x84\xe0\xf0\x59\x63\x78\x82\xc3\x5d\x00\x52\x7c\x84\x20\x77\x37\xa8\x52\x14\xaf\x01\x23\x73\x4f\x68\x0d\x2e\x11\x3d\xcc\xd5\x73\x16\xdb\xc6\x45\x6b\xdc\x8e\xdc\x3e\x79\x02\xc8\xdf\x25\x12\xdc\xc7\x42\x5c\x3d\xbe\xce\x5b\x67\x34\xab\x47\x26\xcf\x00\xe6\x5d\x4f\x7e\xae\xaf\x4c\xc2\xe9\xbc\xfb\x72\x74\xdd\xca\xba\xb7\x95\x3b\xd2\x56\xe5\x80\xca\xdb\xd2\x54\xa1\x33\xbc\x1a\x56\x22\x47\xdc\xc2\x8d\x88\xcc\x07\x72\x55\x44\x2b\xdf\x2e\x68\x9e\xa1\xdf\x8c\x92\x97\x6c\x1d\x2b\x16\x0d\xbd\x91\xed\x43\x6f\xf8\x22\xcf\xa1\xbc\xde\x8b\xfe\xb4\x3f\x7c\xd1\xdb\x0f\x9e\x3f\xfb\x9f\x00\x00\x00\xff\xff\xac\x19\x08\xea\x82\x58\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x3c\x6b\x73\x1b\x37\x92\xdf\xfd\x2b\xfa\x98\x4a\x59\xf2\xf2\x25\x39\xde\xdb\x55\xca\x57\x45\x4b\x8c\xa3\x8a\x24\xeb\xf4\x48\x36\x9b\xca\x6a\xc0\x19\x90\x44\x34\x03\x4c\x00\x8c\x28\xc6\xeb\xff\x7e\xd5\x0d\xcc\x7b\x48\xd1\x8f\xdd\xbb\x3a\x7d\xb1\x39\x03\x34\xba\x1b\x8d\x7e\x63\xbe\x82\x63\x95\xae\xb5\x58\x2c\x2d\x1c\x8e\x0f\x0f\xe1\xad\x52\x8b\x98\xc3\xd9\xd9\xf1\x10\x26\x71\x0c\x57\xf8\xca\xc0\x15\x37\x5c\x3f\xf0\x68\xf8\xec\x2b\x78\xf6\x15\x9c\x89\x90\x4b\xc3\x23\xc8\x64\xc4\x35\xd8\x25\x87\x49\xca\xc2\x25\xcf\xdf\xf4\xe1\x47\xae\x8d\x50\x12\x0e\x87\x63\xd8\xc3\x01\x3d\xff\xaa\xb7\xff\xed\xb3\xaf\x60\xad\x32\x48\xd8\x1a\xa4\xb2\x90\x19\x0e\x76\x29\x0c\xcc\x45\xcc\x81\x3f\x86\x3c\xb5\x20\x24\x84\x2a\x49\x63\xc1\x64\xc8\x61\x25\xec\x92\x96\xf1\x40\x10\x8f\x9f\x3d\x08\x35\xb3\x4c\x48\x60\x10\xaa\x74\x0d\x6a\x5e\x1d\x07\xcc\x3a\x8c\xf1\x6f\x69\x6d\x7a\x34\x1a\xad\x56\xab\x21\x23\x6c\x87\x4a\x2f\x46\xb1\x1b\x69\x46\x67\xa7\xc7\xd3\x8b\xeb\xe9\xe0\x70\x38\x76\x73\x6e\x65\xcc\x8d\x01\xcd\x7f\xcf\x84\xe6\x11\xcc\xd6\xc0\xd2\x34\x16\x21\x9b\xc5\x1c\x62\xb6\x02\xa5\x81\x2d\x34\xe7\x11\x58\x85\x18\xaf\xb4\xb0\x42\x2e\xfa\x60\xd4\xdc\xae\x98\xe6\xcf\xbe\x82\x48\x18\xab\xc5\x2c\xb3\x35\x76\xe5\xf8\x09\x53\x1b\xa0\x24\x30\x09\xbd\xc9\x35\x9c\x5e\xf7\xe0\xcd\xe4\xfa\xf4\xba\xff\xec\x2b\xf8\xe9\xf4\xe6\xfb\x77\xb7\x37\xf0\xd3\xe4\xea\x6a\x72\x71\x73\x3a\xbd\x86\x77\x57\x70\xfc\xee\xe2\xe4\xf4\xe6\xf4\xdd\xc5\x35\xbc\xfb\x0e\x26\x17\x3f\xc3\x0f\xa7\x17\x27\x7d\xe0\xc2\x2e\xb9\x06\xfe\x98\x6a\xc4\x5f\x69\x10\xc8\x48\xb7\x7b\xd7\x9c\xd7\x10\x98\x2b\x87\x90\x49\x79\x28\xe6\x22\x84\x98\xc9\x45\xc6\x16\x1c\x16\xea\x81\x6b\x29\xe4\x02\x52\xae\x13\x61\x70\x3b\x0d\x30\x19\x3d\xfb\x0a\x62\x91\x08\xcb\x2c\x3d\x69\x11\x35\x7c\x26\xe4\x5c\x1d\x3d\x03\xb0\xc2\xc6\xfc\x08\xce\xd4\x62\x21\xe4\x62\x74\xa6\x16\xe7\xdc\x6a\x11\x3e\x03\x88\xb8\x09\xb5\x48\x11\xc4\x11\xdc\xe0\x64\x37\x08\x8a\x41\xa0\xb9\x51\x99\x0e\xf9\x33\x80\xc7\x41\x14\xc6\x03\x63\x75\x16\xda\x81\x64\x89\x03\x5a\x00\x73\xaf\x97\xcc\x0c\x04\x4b\x8e\x60\xce\x62\xc3\x9f\xa5\xcc\x2e\x0d\x62\xb1\xe0\x16\xff\xe9\x58\x73\x9e\xc9\x10\x7f\xa1\x08\xd2\x1e\x2e\x38\x4a\xde\x5c\xe9\x84\x88\x03\x36\x53\x99\x05\x56\x5b\x0c\x20\x65\x9a\x25\xdc\x72\x6d\x1c\xdc\x01\xb4\x51\xc2\xbf\x5c\x72\x8e\xc0\xea\x8c\xfb\x87\x35\x24\x26\x30\xcf\xe2\x18\x84\x34\x96\xe4\x5c\xcd\x1b\x8b\xa1\xc0\xad\x77\xc5\x9e\x06\xff\xdf\xc2\x3f\xe2\x31\xb7\x7c\x57\x02\xdc\xe8\xff\x7d\x7c\x27\x71\xfc\xb1\x28\xc7\xf1\x8e\x48\xa7\x5a\xfd\xc6\x43\xbb\x0d\x65\x13\x2e\x79\xc2\x8e\xfc\x2f\x00\xbb\x4e\xf9\x11\xa0\x96\x90\x8b\x67\x00\xb1\x30\x3b\x0b\x34\x8e\xed\x90\x88\x84\xc9\xf5\xbf\x09\x5f\xd4\xe2\x4a\x72\x69\x09\xae\x1b\xea\x97\x28\x10\xc8\xa7\x96\xfa\xa2\xb6\xaf\xee\x78\x8b\xa8\x40\xc6\x8c\xde\xbf\xf7\xff\xfd\xf0\x61\x94\xd0\x58\x7c\x86\x18\x7f\xf8\x50\x9b\x95\x19\x6e\x06\xc6\x32\xcb\x07\x4b\x21\x6d\x0d\x6f\x37\x22\x65\x9a\x4b\x3b\x08\x95\x44\x3b\xc2\x75\x93\xe4\x52\xb9\x84\x9a\x33\xcb\x3b\x40\x34\x34\x4f\xf5\x95\xe6\x2c\x1a\x58\x91\x70\x95\xd9\x23\x18\xd7\xde\xd1\x79\xdd\xf4\xd2\x09\x56\xfb\xad\x63\xae\x9a\x75\x6d\x8a\xff\xed\xb6\xae\xf8\x31\x17\xb1\xe5\xba\xf8\x59\xa7\x2e\xd5\x2a\xe5\xda\x0a\x6e\xca\xfd\x9b\x65\xe1\x3d\xb7\xef\x48\xae\x2a\x8f\x3b\xd7\x2e\x11\x5e\x28\xaf\x99\xdf\x54\xa7\x77\x0d\x73\x60\x8a\x5d\xde\x34\xbe\x26\xdd\xee\x35\x8b\x87\x24\xe7\x81\x43\xf1\x4e\xb9\x49\x01\x30\xcd\x4b\x43\xbd\x5a\x72\x49\x06\x29\x56\x0b\x33\x98\x31\xc3\xa3\x0a\x58\x00\x27\x30\x68\x7b\x33\x83\x26\x87\xc1\xc9\xe9\xf5\xcd\xd5\xe9\x9b\x5b\xb4\xa6\xf0\xc0\xe2\x8c\x13\xad\x68\xed\x40\x58\x8f\xc9\x8c\x1b\x82\xea\xd6\xae\x41\x9c\xa9\x4c\x46\x4c\x0b\x6e\x8a\x93\xe7\x84\x05\x18\x2c\x85\xb1\x6a\xa1\x59\x92\xfb\x26\xfc\xd1\x6a\x16\xa2\xb9\xa7\x95\xcc\xb0\x02\xaa\x6b\x3b\xf0\x8f\x3f\xa2\xe3\x21\xac\xe3\x55\xe3\xe5\xc6\x8d\xe9\xd8\x9c\x69\x1d\xd0\xa6\xc1\xdb\xb6\x68\x3b\x88\x96\x4e\xca\x51\xf7\x7c\xab\x91\x5b\xae\x19\x2a\x39\x8f\x45\xd8\x26\x6d\x00\xb1\x90\x9c\xe9\xee\xd5\x06\x08\x9e\xd4\x8b\x60\x71\xf7\x90\x4d\x2c\x2d\xf6\xad\xe3\x79\xce\x51\xa6\x35\x5b\x77\xbc\x6d\x4a\x3c\x81\xe9\x18\xd7\x62\x86\xdb\x71\x48\x32\x63\x61\xc6\x21\x51\x52\x59\x25\x45\xc8\x62\xb2\xdc\x28\x34\x28\x92\x4d\x1e\x95\x6b\x1a\x2e\xa3\x01\x4f\x52\xbb\xae\xe9\xa1\xf6\x40\xd4\xfc\x7e\x1b\xf1\xbf\x1d\xe3\x84\xe5\x49\x27\xed\x39\xf5\x32\x4b\x66\x85\xe2\xa8\xff\x39\x83\x72\x04\x91\xca\x66\x71\x17\x12\x2d\x59\x9a\xc7\x8a\xd9\x3f\x7f\xd3\x94\xea\xc6\xde\x7d\x96\x60\x6f\x97\x83\x5d\x65\x7b\x3b\x94\x2e\xf1\xce\x67\xfc\xeb\x24\x7c\xcb\x79\xdb\x26\xde\x0b\xad\x56\x76\xf9\x1d\x0b\xad\xd2\x9b\x85\x7c\xe3\x36\x3f\xb9\xc9\x8d\x2d\x78\x5b\x59\xee\xa9\xc3\x70\xee\x4f\xc0\x82\xd4\x24\xc6\x0f\x4c\xc2\x41\x5b\xee\x65\x96\x7c\x27\xa4\xb0\x7c\x83\x80\x94\x64\x08\x69\xf9\x62\x2b\x1d\x42\x36\x05\xb0\x93\x8c\x8b\xc6\x92\x9f\x44\xca\xb8\x4d\x8a\x09\x59\xcc\xff\x0d\xdb\x70\x8d\xeb\x7c\x09\xa4\x6b\x62\xf9\xe9\x27\xf3\x6c\x8b\x74\xef\x74\x28\xb7\x01\x68\x9d\x47\x87\xb4\x3f\x8a\x1f\x7d\x12\x9f\xb4\x26\x9f\x73\x1c\xff\x1f\xc9\xb2\x9a\xcf\x4d\x1e\x56\x77\x11\xf0\xc5\x84\xf9\x1d\x2d\xf4\x14\xda\x67\x6a\xc5\xb5\xb3\xe6\xb9\x97\x35\x17\xda\xd8\x0d\x62\x00\xb0\x12\x91\x5d\xfe\x1b\xd0\xff\x09\xd7\xf9\x5c\xa6\x3b\x57\xf2\x46\x24\xbc\xed\x8c\x17\x51\x61\x0b\x43\xe6\xa3\x87\xcd\x8e\xfa\x71\x01\xb7\x32\x06\x03\x96\x77\x32\x6e\xbb\x17\x75\x77\x3c\xb3\x69\x66\x41\xc9\x78\xed\x3c\x72\xc2\x11\x03\x4c\x5c\xd2\x58\x96\xa4\xf9\x46\x38\x6f\x1b\x47\x51\x8a\x8f\xc7\x0d\x67\xdc\x67\x01\x67\x1c\x52\xcd\x0d\x97\x96\x72\x52\x2a\x8e\xb8\xf6\x73\x6b\x06\xf5\x71\x70\x9f\xcd\xb8\x96\xdc\x72\x33\x10\x49\x92\x59\x36\x8b\x79\x03\xdb\x2a\xae\x4f\xb3\xac\xc1\x96\x93\x72\xf2\xd3\xd1\xc8\xa4\xfa\xc2\x91\x2c\x8c\xc7\xbb\x0f\xab\xa5\x08\x97\x2e\xd0\xe0\x11\x08\x09\x91\x0a\xb3\x84\x4b\x97\x3b\xab\x8b\x25\x72\x31\x61\x8f\x22\xc9\x12\x88\xb9\x5c\xd8\x65\xce\xc1\xea\x0a\xc2\xc0\x5f\xc6\xe3\x31\x84\x4b\x46\x41\x84\xae\x30\x27\x12\x06\x79\x11\xb5\x49\x9e\x29\x15\x73\x26\xb7\xd0\xec\xa7\x3e\x4d\xf0\xe9\x1c\x0c\xb7\x18\xe1\xdc\xe8\x8c\xf7\x11\x41\x59\x25\xda\xa7\x34\x09\x1a\x65\x0b\xab\x34\x62\x2c\xa5\xb8\xa1\x0d\x5f\x70\xc9\x35\x85\x48\x72\x0d\xa9\x12\xb2\xea\x38\xb9\x98\xf5\xe3\xf7\xee\xbb\x6a\xac\xdb\xa2\xe2\xf9\x95\x0f\x11\x87\x30\x91\xf0\x0b\x8b\x1e\x98\x0c\x79\x44\x71\xa2\x5f\xf2\xd7\xbd\xa5\xb5\xa9\x39\x1a\x8d\xc2\x58\x65\xd1\x70\x41\x09\xf1\x61\xa8\x92\x51\xec\xd3\x98\x91\x0a\xcd\xe8\x41\xf0\xd5\x28\x07\x70\xe7\xe6\x9a\xfd\x1a\xb1\xf5\xcd\xb7\x0a\x12\x66\xc3\x25\x2e\x06\x5c\x5a\x0c\x16\x87\x30\x7d\x64\x49\x8a\xe2\xdb\xcb\xb3\x9d\x43\x24\xf4\xf5\x82\xf1\x3b\x96\xa6\x30\xb9\x38\xa9\xc1\x34\xfc\x81\x6b\x61\xd7\xff\xf5\x7a\x7a\x75\xf5\xee\xaa\xb7\x45\x6a\x1c\x4e\xb8\xfe\xe1\xb8\x21\x31\xcf\x0b\x98\x31\x9b\xf1\x78\xea\x02\x52\xa5\x77\x09\xf6\x59\x14\x09\x27\x0b\x97\x1b\x4c\xdd\x6e\x3b\x75\x56\x5f\x79\x97\x93\x96\xb0\x14\xe6\x5a\x25\xc0\x1c\xde\x70\xcf\xd7\x7e\x1d\x4a\x7f\xca\x3c\xb4\x56\x45\xf6\xbb\x7e\x86\x3b\xf6\xc4\xcf\x40\x75\xc9\x0a\xe0\x7e\x87\xd6\x4e\x5d\x51\x16\x80\x19\x23\x16\x12\x18\xa5\x00\xea\x5e\x12\xa1\x42\x81\xdd\x10\xa6\x0c\x77\xb8\x44\xce\x65\xd5\xdd\xd9\xa7\x14\x39\xbe\xca\x15\x8c\xd2\x2e\x10\x5c\xb2\x87\x3a\x48\x86\x0b\x19\x15\x0a\x66\x79\xd4\x49\x94\x83\x87\x87\x8e\xa5\x4e\x01\x9b\xb5\xb4\xec\xb1\x91\x63\x68\xf8\xe1\xd5\xf9\x2e\x97\x61\x58\xc2\x91\xa8\xbc\x0c\x10\x10\x1d\x77\xc5\xf4\xc0\xb1\x60\xe8\x43\x1d\x9f\xb8\xa8\x01\x75\xe9\x12\x61\x20\x54\xf2\x81\x6b\xeb\xf8\x8a\xc0\x28\x85\x12\xf1\xb9\x90\x25\x03\x1c\x6f\xa2\x82\x03\xa4\x50\x9a\x2c\xf5\x15\x8c\x0a\x21\xa4\x5c\x75\x09\xd5\xad\x45\x94\xcc\x99\x88\x4d\xbf\x02\x7c\x25\xe2\xb8\x83\xa7\x88\x09\xcb\x62\x9b\x6f\xd5\x0d\x69\xd6\xca\x23\x62\x02\xcb\x05\xca\xc1\x12\x86\xc4\x0a\xe3\x6d\xff\xa2\x5f\x83\x4a\x53\x64\xee\xaf\xe5\x93\xac\x81\x71\x9f\xc4\xc6\x81\xf4\xea\xb7\xf2\x3a\xa0\x4c\x61\x30\x84\x0b\x65\xeb\x78\xda\x25\xb3\x48\x8c\xe6\x94\xd4\xca\xd2\x34\xf7\x6c\x0c\x28\xc7\xc3\xfc\xbc\x3b\x2f\x05\xb7\x9c\x00\x53\x71\x86\x06\x74\xb8\x2f\x6a\x0e\xc8\xc6\x07\x4e\x06\x1a\x0c\xa7\x64\x15\x2d\x86\xcb\xb0\x38\x56\x2b\xb7\x4b\x2c\xcf\x10\x96\xda\xd8\x69\xf6\x52\x6c\x3f\x3e\x27\x78\xde\x80\xb0\x43\x5a\x70\xcb\x94\x2d\x99\x41\x6f\x84\x4a\x01\xab\x9e\xa4\xa2\x8c\xb8\x35\x31\x48\x22\x99\xc9\xe2\xec\xf6\xd1\x6c\x65\x86\x9b\x8a\x0c\xb5\x97\x21\xd8\x0c\x4e\xa6\x67\x37\x93\xae\x6c\xe3\xbd\x90\x51\x1f\x4e\x2f\x6e\xfe\xfc\x4d\x25\xc3\xd8\x77\xf3\xa4\xaa\x6e\x21\x83\x4c\x0a\x8b\x5b\xd6\x3b\xe8\x0d\xe1\x3a\x0b\x97\x0d\x51\xf6\x30\x43\x95\x49\x6b\x2a\x5b\x4e\xb2\x50\xda\x17\x67\x71\x48\x3d\xe2\xd9\x76\x16\x21\xa8\xa8\x81\xb6\x07\x12\xe0\x7e\x05\x7d\x08\x10\xbb\xc0\x09\x71\x50\xe1\xb7\x57\x08\x26\x3f\xcf\x81\x43\xe5\xae\xe4\x44\x50\xc7\x55\x73\x50\xa5\xb3\xe8\x00\x3a\x65\xe1\xca\x79\x3c\xf2\x59\x58\x5b\xac\xde\x5e\xb4\x7e\xe6\x48\x29\xe7\x0a\x35\x17\x97\x21\xdc\x54\x92\xae\xe5\x06\xe7\xbc\x22\x51\xd7\x3c\x54\x3a\xa2\x8d\xcc\xab\xae\x4d\x0b\xe1\x39\xe8\x12\x75\xfd\x7c\x47\xab\xbb\x58\x6c\xf5\x86\xa4\x31\xea\xf4\x7a\x76\x98\x3b\x73\xc3\x62\x25\x17\xf9\xec\xb6\xa6\x6d\x6a\xf7\x1a\x71\x68\x58\x3a\xcc\x8e\x1f\xd8\x16\xc7\xc2\xb0\xa0\xb4\xe4\x52\xe0\xe6\x48\xaf\xf3\x9b\x5a\x17\x0d\x16\x89\x5f\xb7\x1d\xf5\x53\x72\x7d\xef\xb7\x9f\x40\x96\x54\x98\xa0\xe1\xdc\x7b\xeb\x94\x8b\x09\xf2\xcf\xef\xaf\xa3\x9f\xc4\xec\x23\x65\x2a\x64\xd2\x47\x0c\x59\x1a\x31\x57\x37\x0f\x11\x25\x81\x71\x7b\xbc\x46\xe1\x9a\x8b\x45\x46\x8e\xde\x05\x5f\xe5\x67\x2b\x64\x12\x27\xb1\x28\x22\x4d\xd7\x50\xbb\x9d\xeb\xf6\x61\x96\x59\xe0\x8f\xc2\xd8\x82\x81\xa6\x82\x40\xa2\x22\x67\xdf\x5d\xdb\x42\xcb\x36\xd8\x25\x17\xba\xaa\xaf\x76\x49\xf6\x6f\x88\x5e\x60\xb3\x6f\x05\xbb\x46\x31\xb0\x2d\xc2\x6b\xae\x4d\xc1\x8d\x65\x02\x3d\xf9\x56\x94\xc3\x1b\x41\x8e\x67\x6e\xd6\xd4\xa9\xf8\xb7\x3d\xf0\x89\x84\x49\x63\xb6\xbe\x60\x49\x2b\x3b\xf6\x11\xe4\x96\x40\xb6\x53\x14\x2a\x19\x0a\xc3\xdd\x29\xc8\xdd\x9e\x4e\x52\x3c\x5e\x6d\x59\xc1\xbf\xcc\xa0\x6f\x2d\x2d\xd7\x73\x16\xa2\x27\x7f\x6b\xd0\xaa\x4a\xcb\x51\x14\x43\x66\x5c\xd3\x0a\x95\xdf\x25\x70\x19\xf9\x46\x0a\xa1\xa2\x3e\x2d\xcb\x9d\xe7\xdf\x02\xdc\xc3\x18\x85\x1b\xeb\x94\x7b\xaf\x1a\x37\xa3\xde\x54\xde\xda\x91\x5c\x0a\x8b\x8f\x50\xa9\x25\x09\x97\x91\xf3\xbc\x66\x6d\x98\x18\xb0\x39\x4f\x65\x9d\x87\xd4\x2d\xb3\x88\x04\x0d\x1e\x84\x11\xb3\x98\xdc\x2b\x14\x67\xd3\x07\x93\x85\x4b\x60\xed\xbc\xd1\x7f\x67\xca\xb2\x61\x5b\x23\x6d\xc8\x13\x76\x15\x52\xba\x02\x82\x27\xd3\x7b\x48\x4a\xe9\xf0\x90\x52\xaf\x48\x9e\xab\xc8\xbb\x62\x1d\xba\x72\xbe\xa9\xa5\x43\x20\xcb\x06\x80\x6a\xec\x8a\xc8\x0e\xe1\xbb\x72\x7b\x9c\x5f\x19\xb0\x34\xe5\x72\x21\x24\xf7\xf1\x20\x4b\x85\xa1\x98\x10\x83\xc5\x11\xf5\x46\xe9\x91\xe6\x26\x55\xd2\xf0\xbb\x98\xa1\x14\x08\xde\x50\x86\x50\x3a\x03\x64\x27\x96\xcc\x14\x91\x4c\x2e\x88\xdf\xdf\xdc\x5c\x42\x0e\x08\x42\x15\xf1\x3e\x04\x05\x60\xfc\x1d\xf4\xdb\xdb\xab\xa8\x9f\x0a\xf9\x10\x2b\x75\x0f\xcc\x42\x81\x02\x41\x36\x59\x18\x72\x63\xe6\x59\x5c\xc0\xa6\xbe\xa0\xdf\xd0\x48\xcc\x3b\x92\xf5\xe5\x28\xe2\xf0\x9c\x74\x40\x77\x32\xf5\x89\x82\x54\xab\x18\xd5\xce\xe6\x6d\xa8\x43\x6d\x49\x30\xef\xee\x34\x76\x0a\xd5\xf6\x2c\xed\x56\xfd\x5b\x45\xad\x53\x2d\x35\xd0\x7b\x4a\x17\xb7\x17\x84\x09\x2c\xb3\x84\x49\xea\x1c\xa0\x4e\xb3\xaa\xea\xcd\xe5\x84\xa4\xa6\xab\x4a\xb8\x63\x82\xac\xfc\xbb\xe7\xeb\x2f\x43\xe4\x0f\xbc\xab\x52\xda\x95\xa1\xcf\xc3\xe3\x2f\x82\x3f\xb9\x11\x37\x88\xeb\x17\xa1\xe2\xc7\x1c\xdc\xf6\xc1\x3b\x89\x5d\x01\x6b\x2a\xb3\xe4\x69\xde\x3c\xbf\xc9\xa3\x5b\x35\x77\xf9\x88\xaa\x7a\x73\x09\x88\x32\xb8\xde\x22\x01\x00\x97\xca\x38\x45\xee\xdc\xd8\x23\x40\x4f\xf5\xe2\x6d\x1f\xde\xbc\x7b\x77\xe6\xa3\x90\x3e\x9c\xbc\xbb\x7d\x73\x36\xed\xd7\x1c\xd9\xb6\x76\x71\x7f\xe7\xef\x2e\xa6\x3f\x3f\xff\x12\x1b\x06\xc0\x65\x96\x74\xef\xd6\xc0\x23\xba\xe1\x25\x62\xbf\xe1\x15\x91\xb4\xe1\x9d\xa3\x73\xd3\xcb\x0a\xf1\x1b\x86\x10\xed\x0d\x53\x97\xc9\x70\x79\x6d\xd9\xe2\x33\x5c\x96\xb3\x12\xc8\xa6\x81\x4f\x8a\x59\x01\xa1\x43\xc4\xea\xc2\x55\x0f\x93\x1d\x01\x60\x70\x6a\xdd\x9f\x73\x09\x1b\x4a\xf1\x0d\x0b\x39\x6a\x31\x26\x97\xab\xdb\x8b\xd3\xf3\xcb\xb3\xe9\xf9\xf4\xe2\x66\x7a\xd2\x87\xcb\xab\xe9\xd9\xe4\xf6\xe2\xf8\xfb\x3e\x4c\x27\x57\x67\x3f\xdf\x4d\x8e\x8f\xa7\xd7\xd7\x7d\x98\x9c\x5d\x7e\x3f\xe9\xc3\x9b\xe9\xcd\xa4\x0f\x6f\x27\x7d\x38\x99\x5e\x5e\x4d\x8f\x27\x37\xd3\x93\xa6\x50\x75\x49\xc7\xa0\xbe\x50\xeb\x6d\xb1\x6e\xeb\x4d\x15\x8d\xd6\x4b\xc2\xaa\xf5\x14\x91\x6c\x3d\x7c\xdb\x7e\x54\x92\xd0\xb9\x7d\xfe\x1c\x0c\x32\x99\x6b\xf2\x8e\x23\x91\x70\xcb\xf0\xa8\x7f\x7a\x75\xf5\xdc\x43\xf8\x44\x11\xda\x30\x7d\x43\x86\x25\x1f\xdd\xf6\xf9\xa9\x7b\x36\x13\x11\xfe\x72\x32\xd5\x36\xe8\x65\x31\xe8\xd3\x19\xb6\xdd\x76\x0b\xb9\xe0\xc6\x9e\xf0\x98\x75\x1a\xb5\x27\x8c\x41\x83\xb3\xa7\x25\xb0\x27\x9c\x04\x9f\x4b\x8c\xd9\xba\x50\xdd\xae\xa0\x01\x21\x23\xe6\xcc\xd6\x1e\x35\x3a\x55\x27\x6d\x7e\x7b\xd2\xdc\x24\x57\xfb\xa2\x2a\x20\x39\xa7\xc8\x4f\xa6\x39\x2c\x32\xa6\x99\xb4\x3c\xf7\xf3\x3d\x4c\xee\x33\xd4\x0f\x4c\xc4\x6c\x53\x37\x8e\x9b\x80\x8c\xed\x63\xac\x1a\x67\x14\x90\x10\xaa\xb1\x32\x06\xa2\x8c\x53\x3a\x5c\x6b\xa5\x5b\x0d\x2c\x00\x86\xbc\xe1\x4b\x8a\x5f\x3e\x9f\xb3\xd7\x15\x68\xbb\xb0\x96\x56\x2f\x03\x28\x64\x72\xae\xac\x4a\x5e\x3b\xbf\xdd\x87\x37\x9d\x3c\x70\x42\x8b\x9c\x5c\x69\x61\x2d\x97\x1e\x9e\x6b\xbf\xea\x53\x4e\x8a\x87\x19\x65\x49\xab\x9b\x88\x33\x8c\x55\xba\x23\xb2\xc5\x3f\xca\xd7\x0a\xe3\x12\xab\x14\x12\x3e\xb0\x78\x17\x2e\xfb\xac\xe8\x06\x64\x5d\x9a\x68\x29\x16\x4b\xae\x61\xa1\x99\xcc\x62\xa6\x85\x5d\xe7\x59\x1d\x93\xb0\x38\xe6\xba\xc9\x9c\x61\x47\xf2\xf1\x07\x21\x5b\xbb\xb6\xbb\x9d\x3a\x2f\x60\x7c\xba\x8e\xf1\x00\x9e\xb4\x52\x3f\x2d\x79\x91\xfa\x4f\xf2\x6b\x00\x2e\x67\xe7\x62\x36\xcb\x24\x57\x99\x29\xf2\x73\xe1\x92\xe1\x21\x68\x2b\x1b\x05\xcc\x0d\xea\x03\xb7\xe1\x10\xae\x55\x82\x01\x55\x32\x13\xd2\x5f\x5c\x50\xf3\x27\xd2\x52\xed\xe8\x8d\x2e\xca\xf8\xd4\x8f\xc9\xd2\x54\x69\xcb\xa3\x61\xdb\xdd\x7a\x3b\xb9\x7d\x8b\x8e\xd5\xf4\x0c\xed\xdd\xf1\xed\xf9\xed\xd9\xe4\xe6\xf4\xc7\x69\xd3\xd6\x7d\x84\xeb\xd4\x6d\x16\x69\xa1\x0e\xcb\xd4\x4c\x42\xe3\xd3\x12\x8d\xba\x88\x28\x29\x48\xb8\xaf\x7c\x05\x12\x3d\xd6\x4f\x0f\xe3\xcf\x3b\xc1\x7d\x72\x0a\xea\x8a\xb3\x68\xe0\xda\x0c\x4e\xe7\x79\x87\x80\xaf\x3a\xb3\x6a\x2d\xa3\x52\x6c\x17\x11\x97\x96\xd2\x72\xed\xc0\x8f\x69\x9f\x23\x9c\xad\xcb\x3c\x7a\xd1\xeb\xcb\xda\xe8\x97\x62\xdc\x27\x7f\xbc\x6d\x73\xda\xb9\x94\x66\x4a\x81\x0c\x25\x12\xe1\x1d\xf9\xda\x60\x25\xdb\x1a\x3b\xf7\xc8\x72\x64\x8a\xbb\x30\x04\xce\x50\x47\x27\x8f\x60\xc9\x35\x6f\xdb\xd2\x9d\x45\x6a\x97\x46\xd1\xad\xc1\xf9\x06\x7d\xdf\xd0\x0b\x1d\xa3\xe4\x67\xe5\xfa\x3a\x92\x7c\xbb\x0b\x14\xda\x93\x82\x9b\x94\x04\x6c\xba\xbf\x45\x69\xf2\x53\x39\x6b\x3b\xa2\xd1\xdd\xa9\xeb\x08\x3e\x77\xa7\xee\x79\xa5\x06\xe6\xea\x4b\x42\xe6\x66\x48\x58\x03\x27\x17\xd7\x8e\xe8\x54\xf3\xb9\x78\x74\xc1\x80\xed\x0a\x77\x85\xeb\xd2\xb8\xbd\x3a\x1b\x70\x19\xaa\x88\x9a\x26\xe2\xd8\xe5\x09\xf3\xa2\x6e\x65\x25\xe3\xec\x12\x72\x32\x5f\xa4\x05\x33\x08\x33\x63\x55\xd2\x48\xa2\x05\xa0\x34\x04\xfc\xd1\x72\x8d\x4e\x66\xe3\x65\x6e\x25\xdd\x22\x6d\xc7\x64\xa9\xb2\x38\xa2\x3b\x84\x0c\x24\xb3\x99\x66\x31\x2c\x05\xd7\x4c\x87\x4b\x34\xec\xb0\xd0\x2a\x4b\x85\x5c\xd4\xf2\x7a\x47\xd0\xeb\xc4\x65\x24\xe4\x83\x12\x21\x1f\xa5\x4c\x44\x23\x96\x50\x0e\xb6\x9d\xa3\xdd\x80\xec\x28\xd5\x2a\x41\x03\x96\x99\x51\x96\xf6\xa0\xf7\x89\x79\xc3\xde\x27\xdb\x89\x4c\x8a\x56\x57\xde\xee\xa2\x77\x2b\x45\xf3\xfc\xb7\xa5\x0b\x97\xa0\x52\x8d\xd3\xb7\x95\xa3\x53\x74\x02\x68\x9e\x5b\xc6\x53\x0b\xa2\xbd\x69\xa4\x0a\x2b\x77\x29\xc5\xbc\xda\x7c\xe0\x4a\x42\xc2\x40\x40\x41\x7d\xd0\x87\xc0\x45\xf0\x41\x9f\x24\xa5\x1a\xb1\x07\x6d\x57\x95\xca\x4d\x88\x64\xe0\x7b\x0f\x5c\xd9\x4a\x73\x6f\x3c\x58\xb5\x86\xe1\xdc\xba\x1a\x01\x1d\xde\xef\x89\x98\xcf\xb9\xe6\xd2\x82\x59\x1b\x54\x88\xde\x13\xa0\x8e\xde\xb2\x28\x66\xbc\x97\x9d\x28\xcd\x81\x33\x23\xe2\x75\x59\x44\x68\x01\xdd\x33\x85\x8b\x42\xae\xc8\x78\x38\x3e\xbc\x7f\xb3\x0e\xe0\x8e\x80\xdf\xd5\x4b\x10\xcc\x40\x70\x38\x7e\xb3\xf6\x25\x59\x3f\xb1\xcb\x70\x04\x2f\x5f\x1d\xbe\xdc\x0e\xe8\xe5\xf0\xd5\xf9\x9b\x75\xb0\x3f\x84\xef\xd5\x8a\x3f\x70\xdd\x2f\xb6\x80\x18\xd7\xa5\x0c\x82\x7b\x5a\xdc\xe6\x77\x6b\x0a\xc4\x2b\x02\x80\xa6\x30\x5e\xb1\xb5\xaf\xe4\xa9\xcc\x30\x6a\x5b\x98\xc3\x6c\x6d\xb9\x69\xe7\x96\x24\x35\x45\x59\xae\x61\xa9\x56\x20\xac\xe7\x6b\x15\x61\xb2\xfb\x6b\x95\xc1\x8a\x49\x0b\x0c\xdc\xb1\x2d\x34\x8f\xea\xc8\x5d\xa3\xbf\xe8\x9b\x47\x58\x68\x2b\xa5\xf1\xe3\xcb\xdb\x81\xe1\xa1\x42\xa4\xf2\xb8\x8c\xc1\x6f\x6a\xd6\x2f\x12\xe8\xae\x82\xdc\x02\xca\xa4\x97\xc6\x8a\x17\x15\xe4\x48\xac\x96\xca\x14\x32\x87\xac\x32\xef\x8f\x2f\x6f\x3f\x04\xb0\x87\x0a\xe7\xf7\x4c\x3c\xb0\x98\x4b\x1b\xb7\xc3\xc8\xe0\x20\x1f\x9a\x67\xe3\x03\x83\xdb\xe2\x7a\x62\x10\x33\xd7\x78\x70\x70\xd8\x1f\x8f\x5f\x55\xf1\x2f\x77\xa2\x3b\x37\x83\x78\xe4\x51\x0e\x6e\xf9\xc1\xe1\x78\xfc\x2a\x40\x15\x4e\x9a\x0b\x63\x1c\x0c\x79\xc4\x66\xde\x76\xb9\xd4\x9e\xb7\x14\xd0\x50\xab\x08\x89\x7a\x1e\x9c\xc0\x8a\xad\x9b\x9c\x04\x96\x9f\xde\x2a\xe7\x36\xd5\x45\x5a\x9c\xbc\xf7\xfc\xe9\xe7\xbd\x2d\xee\x92\x75\xe5\xd0\x21\x69\x43\xa4\xad\x7d\xc0\x0a\x87\xd0\x51\x3f\x3a\x18\x8f\xc7\xc1\x3e\x29\x11\xb4\x17\xc1\x0f\x22\xe7\x3e\x02\x77\x70\x83\x83\x83\xe1\x7f\x1e\xbe\x0c\xfc\xec\xce\x93\x90\x43\x3b\xfc\x06\xf7\x8a\x02\xd4\x3c\x16\xf0\xda\x91\x5a\x6a\xc0\x64\x33\x5f\xae\xfa\x05\x07\xdd\xca\x6e\x9f\xf4\x58\x45\xae\x12\x79\x4b\x73\xd5\x1c\xce\x39\x33\x99\xe6\x65\x2f\x22\x41\x55\xf3\xc4\x3d\xa7\x7b\xeb\x59\x98\x25\xc3\xa5\x4d\xe2\x7d\xc0\xb8\x28\x62\xba\x23\x30\x7f\xf1\xe2\x0d\x33\x22\xf4\x58\xed\xdd\x5e\x9c\xde\xec\xbf\x78\x01\x2f\x20\x98\x21\x87\x67\xc2\xe2\xff\x51\x57\xe0\x01\xc5\xff\x9b\x00\x9c\x84\xe1\x8f\x44\xc8\xae\x20\x48\x66\x6e\xec\x32\x80\xa5\xca\x34\xfe\x37\x0a\x20\x62\x6b\xfc\xdf\x41\x00\x91\x48\xb8\x34\x42\xb9\xfb\xf3\x2f\x5e\x5c\x92\xaf\xc1\x0d\xec\x5d\x5e\x4d\xbf\x3b\xfd\xdb\xfe\x8b\x17\x6d\x4c\x21\xb8\x0f\xe0\x5e\xc4\x0a\xf6\x0e\xc6\xff\x78\xb9\x8f\x4f\xce\xf1\x88\x2d\x18\x3d\xf9\x33\x3d\x79\x1b\xc0\x42\xf8\x27\x7f\xa5\x27\x37\x01\x58\xae\xdd\x93\x83\xc3\xfd\x2e\xc0\x97\x01\xa4\xdc\xfa\x21\xaf\x68\xd6\x34\x40\xe5\xe0\x9e\xfc\x85\x9e\xfc\x3d\x80\x3f\xb8\xf5\xa3\x0e\x0f\xe8\xd9\xcf\x01\xac\x95\xed\x48\xd6\xd0\x98\x6f\x68\x4c\x12\x40\x22\xe2\x58\xd0\xb3\x81\xc3\x3b\xc3\x67\xa1\x76\xa4\x0c\x1c\xe6\x32\x00\xc9\xa4\x7f\xf4\xd7\x4e\x3c\xd3\x00\x52\x11\xfa\x21\x07\x87\x34\x6d\x1e\xc0\x9c\x27\x36\x7f\xe8\xb0\x67\x01\x30\x5b\x3c\x73\xf8\xff\xd1\xde\xa9\x3f\x78\x9a\x0f\xf2\x04\xad\x91\xa0\xb0\x78\xe8\x28\xf8\x41\x20\xe3\x67\x02\xf6\x0e\xff\x71\x30\x76\x8c\x17\xc8\xf9\x99\x68\x13\x7e\xf8\x8f\x43\x37\xe4\xad\xc0\xad\x70\xb3\x5e\xba\x47\x37\x02\xf7\xc2\x3d\xfa\xc6\x3d\xba\x14\xc8\x7b\xf7\xe8\x15\x3e\x7a\xf1\x56\xb3\x24\x61\xba\x43\x04\xf0\x8c\x2c\xdc\x5b\x60\xb1\x51\xde\x75\x75\xf6\x9b\xaa\xa1\x52\x72\xd7\xa6\x8a\xa0\x47\x28\x68\x0f\xc2\xf8\xd6\x40\x8d\x76\xbd\x8d\x2f\x33\xae\x3f\x6f\x2e\x1e\x41\xa5\x5c\x33\xab\xf4\x7e\xcd\x15\x34\x7d\xb2\x6d\xa3\xf7\x3c\x61\x22\x76\xaa\x38\x38\x17\x6f\xd6\xa3\x83\x71\xd2\x51\xc2\xdd\x63\x31\x9a\xb6\xc5\x92\x34\x9d\xf7\x3f\x59\x9c\x28\x63\x41\xa2\x31\x75\x8e\x70\x30\x32\x81\xd7\x91\x4e\xb7\x39\xad\xf6\x2d\x22\xba\xd9\x8f\xf5\xe6\x73\x46\xa9\x8a\x94\xbe\x09\xc1\x2c\xfc\x9e\x71\xbd\x76\xc1\x2e\xb5\xa8\x5a\x72\xc3\x22\xae\xe3\x35\x3a\xf6\x61\x96\x64\x31\xa9\xf4\xb6\x3f\xa0\x21\xe2\xb1\xf5\xee\xc2\xfe\x10\x19\x37\x0c\x20\xc9\x62\x2b\xc8\xfd\xca\x3b\x2b\xe9\x9a\xb6\xa1\xb4\x7f\x8d\x69\x6d\x88\x1b\x99\xf8\xf6\xcd\x7a\x18\x39\xf6\xdd\xbf\x5f\x31\x6b\x3f\x0c\x97\xc1\xb0\xb6\xad\xd4\x84\xd0\x02\x49\xdd\x6e\x14\x46\xc3\x5c\xc5\xb1\x5a\x19\xba\xce\x97\xf7\x1d\xbd\x86\xe3\xfc\x0e\x39\xbc\x87\xde\xb0\x57\xf9\xfd\x01\x9f\x8c\xda\x1e\x7a\x75\xc4\xb7\x95\x5f\xaf\x61\x0f\x7e\x01\xa7\x86\xe0\x57\x40\x85\x08\xff\x84\xde\xd7\x3d\xd8\x87\x5f\x60\x22\xa5\xf2\xfe\xe1\xaf\x2d\x90\xff\xac\xbe\xfe\x27\xf4\x0e\x7a\xf0\x6d\xf5\xd1\x6b\xe8\xbd\xef\xc1\xc5\xe4\x7c\x0a\xbd\x0f\xf8\xee\x42\x59\xee\x44\xb5\x1c\xd5\xe9\x5a\x91\xe1\x67\xe0\xba\x38\x2c\xda\x64\x61\x73\x4e\xa0\xfd\x44\x2c\x83\xc2\x29\x60\xe5\x92\xbe\x8f\xb9\xed\xad\xc4\x4a\xf2\x8a\xaf\x96\x33\xb8\xf4\x45\xd0\x98\x07\x07\x41\x6b\x0f\xdf\x6b\xd7\x7d\xf2\x61\xd4\x16\xd1\xd7\xaf\xe1\x60\x64\xd0\x1d\x7f\xb3\x7e\x6f\x35\x93\x26\x41\xff\x22\xfa\x30\x32\xf8\xee\xcd\x7a\x64\x02\x12\x30\xe4\x01\x99\x6f\x06\x06\xc1\xf9\x46\x0b\xa9\xe4\x60\x16\x33\x79\xdf\xce\xca\x68\x21\x29\xa2\x81\xc9\xf5\xf1\xe9\x69\xa5\x55\x9d\x62\x50\x7f\x35\x1f\x45\x3d\x78\xef\x04\xec\x83\x5b\xe9\x20\x28\xfd\xfa\x36\xc2\xae\x8f\x92\xe9\x35\xfc\x52\x37\x49\xf8\xb8\x34\xae\x5c\x0e\x57\xe2\x5e\xa4\x3c\x12\x8c\x4c\x2b\xfe\x1a\x9d\x54\x67\xdc\xfd\x9e\x31\x69\x85\x5d\xb7\xf5\xb6\x9a\xc3\x41\xd1\x26\x83\x07\x3e\x38\x20\x3e\x50\xe4\x83\xb1\x97\xbf\xdc\x4b\xfe\x26\xdd\x4c\x97\x4a\x16\x8e\xf3\x0c\xcd\x73\xe7\x81\xf0\x4e\x44\x9a\x6a\x95\x6a\xc1\x6c\xb3\x2f\xa5\x27\xf9\x8a\x82\x71\x03\x29\xd7\x68\x7d\x7b\x79\x3d\xa6\x60\x49\x97\x68\xa0\xff\x32\xf2\xc7\xf4\xbd\xe4\xab\x01\xc1\xf8\x80\x8f\xf6\x5c\x64\x51\x8b\xe6\x82\x57\x01\xac\x48\x35\x25\x9c\x49\xe8\xbd\x6a\xfb\xf0\x39\x1e\xfb\x2d\xef\xb2\x57\x8b\x01\x52\xb6\xe0\xf0\x20\xf8\xaa\x82\x31\x81\x6e\x81\xac\xd3\xe0\x70\x1e\x8f\xc7\x39\xda\xf7\x05\x01\xf7\xef\x11\xe8\x1d\x01\x6d\x93\xb0\xc1\x33\xc6\x18\xe9\xd5\xf0\x65\x83\xae\x97\xe3\x71\x17\x86\x4e\x65\x7e\xbd\x55\xce\xea\xc2\x55\x2c\x72\x80\x0e\xa7\x0f\xd7\xdc\x99\xe5\x79\xde\x1a\x16\xe2\xc1\x7d\x79\x20\xe5\x3a\xc4\xa0\xb4\x55\x03\x76\x01\x62\x33\xba\x76\x52\x51\x4a\x95\xef\x98\xd4\x4c\xba\x3a\xee\x78\x38\x2c\x17\xdd\xe4\x65\xfb\x9d\x7d\x19\x10\xed\x06\x7a\x2f\x73\x34\x3c\xb9\xe8\xdd\x0c\xbf\x46\xf3\x15\xa1\x89\xa0\xce\xe7\x0d\x2c\xf5\x67\x13\x47\x90\x05\xee\x6f\x42\x0e\x31\x73\x19\x54\xd7\xa0\x8f\x71\xb2\xb7\x42\x1d\x62\x3a\x5b\xc3\xc1\x78\x4c\x64\xd4\x42\xd7\x2a\xc3\x7c\x04\x5d\x27\x6a\x3c\x1c\xe7\x74\xb5\xd3\x36\x55\x3a\xeb\x29\x96\x8d\x5d\x24\xbb\x27\x4f\x36\x75\x8e\xec\x5a\x25\xd9\xd6\x2d\xb2\xad\x48\x42\x21\x81\x33\x1c\xa6\x72\x17\xa1\x0f\xcc\xdd\xb9\x17\x72\x31\xa0\x42\x56\xfb\xe4\x52\x60\xfc\xe9\xf5\x11\x17\xad\x77\x04\xf5\x4f\xd6\x47\x3e\xa3\x1d\xa5\xb3\x11\xe5\xb3\xeb\x28\x9d\x6d\x27\x9d\x0d\x27\xdd\xad\x26\x1b\x9a\x4c\xb6\xb6\x97\x34\x1b\x4b\x9a\x49\xf1\xdd\xee\x51\x35\x92\xe1\x9b\xee\xbb\xd1\x1d\xcd\x58\x70\x69\x07\x45\x27\x51\x9e\xb5\xc9\xcb\x25\xba\xb8\x8f\xd6\xc8\xf8\x07\x3d\xaa\x58\xde\xb9\x96\x54\xf4\x00\x7a\x72\x21\xe4\xe3\xc8\x7b\x0b\xa6\x57\xa6\x69\x4b\x68\x4e\x57\xd1\xa7\xc2\xa8\x62\x5d\x03\x79\x50\xbb\x91\x46\xa2\x15\x92\xf0\x92\xcb\xef\x32\x84\x74\x8b\x8d\xdc\x20\xf2\x72\x8b\xd1\x47\x10\x4c\x06\x7f\x47\x34\xd8\xe0\x8f\x46\x3f\x64\x30\x1e\xfc\xb5\xcc\x16\xb8\x0e\x50\x16\x57\x97\x0a\xee\x06\xc3\xfe\x9f\xfe\xe3\xc5\xf3\xe7\xfd\xbd\xfd\xaf\x47\xde\x43\x9d\x2b\xbd\x62\x3a\x1a\x98\x98\x99\x7a\xd4\x5f\x4c\x85\xbd\x60\x14\xec\x43\xc4\xa5\x72\x2a\x31\x4f\x33\x53\xd5\xdf\x65\xd5\x05\x0f\xe9\xd6\x80\xfb\x6e\x4c\xd9\x27\xde\xcc\xd3\xb8\xdb\xc9\x25\x68\xef\x0e\x20\x90\xda\x3d\x96\x92\x9b\xc5\x8d\x2f\xd7\x0a\xdc\xba\x64\xe0\x17\xfa\xa5\x92\xb0\x7f\xca\xc5\xb9\x74\xca\xd0\x0d\x17\x72\xb1\x5f\xcf\x7c\x16\xb9\xc1\xe2\x7b\x3a\x6d\xa4\x58\x9a\x72\xa6\x4d\x7e\x37\x20\xf8\xe5\x7c\x7a\x73\x75\x7a\x7c\x77\x7a\xf2\x6b\x40\xc5\xb6\x1a\x44\xfa\xdc\x56\xae\x53\xdc\x55\x8f\xc9\xe5\x69\xf9\xe9\xa9\x8a\xcb\xda\x5e\x2a\xff\x66\x4a\xb5\x22\x51\x83\x5e\xdc\xa5\x0c\x7a\xc5\x77\xa2\x92\xf5\xc0\xff\xbf\xf8\x4c\x14\x49\xee\xd7\x87\xdf\x55\x64\xf7\x79\xed\x78\xed\xa0\x4a\x3c\xcc\x8f\x3f\xad\x97\xb5\x8f\x2f\xb5\x0e\xec\xc6\xe2\x94\x5f\xb0\x92\x76\x2f\x2e\xbb\x74\xb7\xd5\xec\xa8\x11\xf3\x8f\x54\x51\x7a\x3b\xac\x9f\xfb\x41\x81\xca\x11\x1c\xc7\x2a\x8b\xf2\x9f\x09\x93\x6c\xc1\xf5\xa8\x4d\x8c\xbf\x9c\x73\xd4\x2e\xff\xb8\xaf\x6c\x35\xd6\x77\xd7\x37\xbe\xfc\xc5\xf6\xdb\x02\x6e\x65\xcc\x27\x5e\x6c\x8f\x99\xb1\x1e\xd1\xed\x77\xdb\xdb\x6c\xf8\xd7\xdc\x6d\x27\x1b\x5a\x5c\x9c\xfd\x78\x11\xfc\xb1\x36\x7f\xa3\xe9\xa8\x5e\xbc\x6d\xdf\x55\x12\xa6\xf1\xb9\xad\xfc\x4b\x5a\x1b\xef\x55\x75\xdc\xc8\x2a\x6f\xdf\xd6\x4b\x28\x3e\xcb\xdc\xbc\x8d\x3b\x84\x9b\x55\xdd\x88\xe4\x9f\xbb\xf3\x2d\x3a\x45\x12\x16\x99\xec\xfc\xc1\xf2\xfa\xe8\x11\x04\xd3\xbf\xdd\x5c\x4d\x8e\x6f\xf6\x68\x73\xf6\x5d\x04\x71\x35\x7d\x3b\xfd\xdb\xe5\x5d\xed\x55\xdd\xa2\x68\xbe\xe0\x8f\xfb\xde\x4c\x30\xbd\xa0\x6b\x2c\xb8\xe0\x11\x1c\x0c\x73\x79\xbf\x59\xd6\xcf\x6b\xf3\x0a\x31\xd2\x52\x83\x5a\x16\xcf\x8a\x74\xbd\x2b\x1e\x15\x97\x6c\x87\x70\x38\x74\x8b\x1f\xc1\x04\xff\x43\x29\xf6\xca\x7d\x2c\x62\x79\xbd\xf0\xbc\xe4\xf9\x17\x5c\xaf\xa6\x87\xf9\x95\xe0\xc2\x0a\x2c\x84\x5d\x66\x33\xaa\x3f\xba\x72\xe4\x48\xf3\x43\x67\x0a\xae\x69\xe8\x3e\x75\x24\xd4\x40\x32\xc0\x55\x62\x0e\x21\x4b\x6d\x46\xc9\x7e\x95\xa5\xdd\x37\xa7\x2b\xdf\x11\x75\x97\xea\xab\x2c\x68\x5f\x35\xac\x55\x90\x8a\x0b\x2e\xb5\x5b\xc4\xc5\x6d\xdc\x19\x9f\x2b\x17\x0e\xbb\x94\x97\xab\xe8\x2d\xf8\x63\x1d\xec\x69\xee\x04\x93\xbf\x82\x10\x1c\x3e\x6b\x0c\x4f\x70\xb8\x0b\x40\x8a\x6f\x10\xe4\xee\x06\x15\x8a\xe2\x35\x60\x64\xee\x09\xad\xc1\x25\xa2\x87\xb9\x7a\xce\x62\xdb\xb8\x67\x8d\xdb\x91\xdb\x27\x4f\x00\xf9\xbb\x44\x82\xfb\x56\x88\x2b\xc7\xd7\x79\xeb\x8c\x66\xf5\xc8\xe4\x09\xc0\xbc\xe9\xc9\xcf\xf5\x85\x49\x38\x9d\x77\xdf\x8d\xae\x5b\x59\xf7\xb6\x72\x45\xda\xaa\x1c\x50\x79\x59\x9a\x0a\x74\x86\x57\xc3\x4a\xe4\x88\x5b\xb8\x11\x91\xf9\x40\xae\x8a\x68\xe5\xd3\x05\xcd\x33\xf4\x9b\x51\xf2\x92\xad\x63\xc5\xa2\xa1\x37\xb2\x7d\xe8\x0d\x5f\xe4\x39\x94\xd7\x7b\xd1\x9f\xf6\x87\x2f\x7a\xfb\xc1\xf3\x67\xff\x13\x00\x00\xff\xff\x6d\x5e\xee\xfa\x81\x58\x00\x00"), }, "/logging/log_view.yaml": &vfsgen۰CompressedFileInfo{ name: "log_view.yaml", @@ -566,9 +566,9 @@ var Assets = func() http.FileSystem { "/monitoring/dashboard.yaml": &vfsgen۰CompressedFileInfo{ name: "dashboard.yaml", modTime: time.Time{}, - uncompressedSize: 656064, + uncompressedSize: 656060, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\x5d\x73\xdc\xb8\xb1\xf0\x7f\xbf\x9f\xa2\xeb\xe4\x5f\xe5\xa4\x4a\x92\xbd\xd9\x6c\x52\xf1\x73\x35\xb6\xc6\x6b\xd5\x4a\x1a\x3d\xd2\x38\xde\xad\x3d\xa7\x2c\x68\x06\x9a\xe1\x63\x0e\x39\x21\x39\x92\x75\x52\xf9\xee\xff\x02\xc0\x77\x82\x24\xba\x01\xca\x4e\x02\xde\xac\xd7\x1e\x34\x41\xbc\xa3\xbb\x7f\xdd\xbf\x83\xb7\xf1\xfe\x29\x09\x36\xdb\x0c\xfe\xf8\xea\x8f\x7f\x84\x9f\xe2\x78\x13\x72\x38\x3f\x7f\x7b\x02\xb3\x30\x84\x6b\xf1\x4f\x29\x5c\xf3\x94\x27\x0f\x7c\x7d\xf2\xdd\xef\xe0\xbb\xdf\xc1\x79\xb0\xe2\x51\xca\xd7\x70\x88\xd6\x3c\x81\x6c\xcb\x61\xb6\x67\xab\x2d\x2f\xfe\xe5\x08\xfe\xc6\x93\x34\x88\x23\xf8\xe3\xc9\x2b\xf8\xbd\xf8\xc1\x7f\xe5\xff\xf4\x5f\x7f\xf8\x3f\xdf\xfd\x0e\x9e\xe2\x03\xec\xd8\x13\x44\x71\x06\x87\x94\x43\xb6\x0d\x52\xb8\x0f\x42\x0e\xfc\xcb\x8a\xef\x33\x08\x22\x58\xc5\xbb\x7d\x18\xb0\x68\xc5\xe1\x31\xc8\xb6\xf2\x35\xb9\x10\x51\x8f\x5f\x73\x11\xf1\x5d\xc6\x82\x08\x18\xac\xe2\xfd\x13\xc4\xf7\xf5\xdf\x01\xcb\x54\x8d\xc5\xb3\xcd\xb2\xfd\xeb\x97\x2f\x1f\x1f\x1f\x4f\x98\xac\xed\x49\x9c\x6c\x5e\x86\xea\x97\xe9\xcb\xf3\xb3\xb7\xf3\xcb\x9b\xf9\xf1\x1f\x4f\x5e\xa9\x32\x1f\xa2\x90\xa7\x29\x24\xfc\xef\x87\x20\xe1\x6b\xb8\x7b\x02\xb6\xdf\x87\xc1\x8a\xdd\x85\x1c\x42\xf6\x08\x71\x02\x6c\x93\x70\xbe\x86\x2c\x16\x35\x7e\x4c\x82\x2c\x88\x36\x47\x90\xc6\xf7\xd9\x23\x4b\xf8\x77\xbf\x83\x75\x90\x66\x49\x70\x77\xc8\x1a\xcd\x55\xd4\x2f\x48\x1b\x3f\x88\x23\x60\x11\xfc\xd7\xec\x06\xce\x6e\xfe\x0b\xde\xcc\x6e\xce\x6e\x8e\xbe\xfb\x1d\x7c\x3c\x5b\xbe\x5f\x7c\x58\xc2\xc7\xd9\xf5\xf5\xec\x72\x79\x36\xbf\x81\xc5\x35\xbc\x5d\x5c\x9e\x9e\x2d\xcf\x16\x97\x37\xb0\x78\x07\xb3\xcb\x5f\xe1\xe7\xb3\xcb\xd3\x23\xe0\x41\xb6\xe5\x09\xf0\x2f\xfb\x44\xd4\x3f\x4e\x20\x10\x0d\xa9\x7a\xef\x86\xf3\x46\x05\xee\x63\x55\xa1\x74\xcf\x57\xc1\x7d\xb0\x82\x90\x45\x9b\x03\xdb\x70\xd8\xc4\x0f\x3c\x89\x82\x68\x03\x7b\x9e\xec\x82\x54\x74\x67\x0a\x2c\x5a\x7f\xf7\x3b\x08\x83\x5d\x90\xb1\x4c\xfe\x4d\xe7\xa3\x4e\xbe\x0b\xa2\xfb\xf8\xf5\x77\x00\x59\x90\x85\xfc\x35\x5c\xc4\x51\x90\xc5\x49\x10\x6d\x5e\x9e\xb2\x74\x7b\x17\xb3\x64\xfd\x1d\xc0\x9a\xa7\xab\x24\xd8\x0b\x29\xaf\x61\xb9\xe5\xb5\xdf\x41\xf9\x3b\x48\x78\x1a\x1f\x92\x15\xff\x0e\xe0\xcb\xf1\x7a\x15\x1e\xa7\x59\x72\x58\x65\xc7\x11\xdb\xf1\xd7\x50\x97\xa7\xfe\x79\xcb\xd2\xe3\x80\xed\x5e\xc3\x3d\x0b\x53\xfe\xdd\x9e\x65\xdb\x54\xd4\x65\xc3\x33\xf1\x1f\xcd\x6b\xef\x0f\xd1\x4a\xfc\x9f\x18\x88\xb2\x27\x37\x5c\x8c\xbf\xfb\x38\xd9\xc9\x4f\x04\x76\x17\x1f\x32\x60\x8d\x97\x01\xec\x59\xc2\x76\x3c\xe3\x49\xaa\xe4\x1e\x43\xb7\x4a\xe2\x29\xc6\xcf\x6b\xc8\x92\x03\xcf\xff\xb2\x51\x89\x19\xdc\x1f\xc2\x10\x82\x28\xcd\xe4\x68\x8f\xef\x5b\x2f\x13\xc3\xee\xc9\xb4\xf6\xf2\xc7\xdf\x56\xfd\xd7\x3c\xe4\x19\x37\xfd\x00\xf5\xeb\xaf\x5f\xdf\x59\x18\x62\xab\x1c\x86\x86\x95\xde\x27\xf1\xff\xe3\xab\x6c\xa8\xca\xe9\x6a\xcb\x77\xec\x75\xfe\x7f\x00\xd9\xd3\x9e\xbf\x06\xb1\x56\x44\x9b\xef\x00\xc2\x20\x35\x1e\xd0\xe2\xb7\x9a\x11\xb1\x63\xd1\xd3\x33\xd5\x57\xac\xe5\x71\xc4\xa3\x4c\xca\x55\x3f\xcd\x5f\x51\x56\xa0\x28\x9a\xaf\x1a\xed\x7e\x55\xd3\x3b\x58\x97\x95\x49\x5f\xfe\xe3\x1f\xf9\x1f\xff\xf9\xcf\x97\xeb\xe2\xe7\xe2\xaf\x45\xa5\xff\xf9\xcf\x46\xc1\x3d\x4b\x78\x94\x1d\xaf\xe2\x48\x6c\x17\x3c\x69\x7f\x53\xb5\x7a\xac\x12\xce\x32\xde\xf8\x36\xed\xd2\x52\xff\xa7\x84\xb3\xf5\x71\x16\xec\x78\x7c\xc8\x5e\xc3\xab\xc6\xbf\xc9\x09\xd9\xf7\x8f\x6a\xe4\x74\xff\x55\xb5\x5e\x7c\xa7\x6b\xf5\xfc\xff\x55\xdf\x94\xff\xb3\x0e\xd2\x7d\xc8\x9e\x2e\xeb\x7f\xd7\xfc\xc4\x7d\x12\xef\x79\x92\x05\x3c\xad\x7a\x69\x15\x87\x87\x5d\x74\xce\x9e\xc4\xdb\xcb\xbf\xd5\xbe\xbf\xaa\xf4\x26\xce\x97\xdf\xb7\xb5\xd2\xba\x5f\x29\x29\x65\x4f\xf6\xfc\xbc\x33\x80\x45\x27\xf1\x28\x53\xbb\xe3\x43\xb0\xe6\x6b\x08\xa2\x2c\x06\xfe\xf7\x03\x0b\xc3\x27\x48\xf7\x6c\xc5\xd7\x79\xdd\xe5\x9e\x24\xf6\x9f\x9a\x44\x80\xc7\x60\xbd\xe1\x59\x0a\x2c\xe1\xc0\x92\x84\x45\x1b\xbe\x86\x07\xf1\xf5\x2b\x21\xe2\xa4\x53\xdb\x55\x1c\xdd\x87\xc1\x2a\x4b\xeb\xad\x70\x0c\x9b\x24\x58\x77\x2a\x7c\x0c\xbb\x38\x65\xc1\x4a\xf3\x0f\x49\xfc\xd8\xf9\x5b\x5d\xbb\x57\x6d\xdf\xfa\xcb\xa2\xe9\x45\xa5\x9f\x5a\xff\xa2\x6d\xfd\xb4\xf5\x23\x4d\x6b\xaa\x66\x8a\xef\xcb\x86\x15\x4b\x96\x1a\x2e\x27\xda\x57\xa4\x3c\x5a\x1f\xf3\xdd\x3e\x7b\x6a\x4c\x83\xe6\x8f\xc4\xaa\x92\xf7\xb0\xf8\x63\xeb\x37\x41\xc6\x77\x9d\x4f\xeb\x1d\x57\xad\xef\x1b\x18\x37\xfa\x8f\xee\x6f\x64\xf5\x3c\x72\x71\x90\xd5\xfd\x4b\x51\xa5\x20\xca\xf8\x86\x27\xda\x5f\xa8\x85\x53\xfe\xe6\xcf\x7f\xd2\xfe\xa2\xd5\x33\x1f\xe5\xeb\xb4\xbf\xec\x74\x4f\xc2\x43\x96\x05\x0f\x3c\xaf\xa3\x3a\xc1\x06\x69\xde\x6d\x27\xb5\x2e\xcc\x7f\xa1\x15\x0b\x62\xb2\x94\x47\x80\xf5\xff\x3b\xa4\x99\x3c\x93\x3d\x06\xeb\x6c\xab\xba\x3e\x1f\x05\x91\x3a\xef\xad\x12\xce\x23\xf8\x7d\xf1\xf6\x1e\xa1\x59\x0c\x7b\xce\x93\xf4\x0f\x27\xf0\x93\x5c\x16\xd5\x49\x4f\x55\xe4\x08\x36\xf5\xbf\x2b\xde\x94\x95\x15\xee\x11\xda\xa8\xc2\x09\x9c\xdd\x43\xbc\x0b\xb2\x8c\xaf\x8f\x80\xc1\x03\x0b\x0f\x72\x4b\xfe\xbe\xfc\xa2\xc7\xad\xb8\x18\x24\x5c\x1c\x34\x83\x68\xd3\x1e\xb2\x50\x4d\xf7\xa1\x0e\xd6\x4d\x28\x7d\xe7\x29\x59\x66\xbd\x97\x4f\xa3\xda\x7a\xd3\x59\x6b\xc4\xcd\xa0\xde\xa3\x03\x75\x18\x99\x77\xf5\x9f\x0e\xcf\x3e\xf5\xf4\xcc\xc1\x7a\xab\xf4\xcc\xc4\x56\xc3\x8c\xcf\xc7\xa1\x46\x1b\x9b\x9c\xea\xb9\x0b\x59\xf4\xb9\xff\x9f\x8d\x2a\x0c\xdd\xde\x7c\x23\xc4\x9a\xfc\xdc\xf8\x1b\xc7\x24\xb6\xce\x9a\xf2\xbb\xd4\x96\xa5\xef\xfa\x7a\x3d\xb4\xbb\x50\xfb\x39\x86\x2f\x4f\x6f\xb7\x2c\x19\x6a\x85\x63\x48\x57\x71\xc2\x57\xd5\xf9\x49\xff\xab\x8c\x7f\x19\x16\x13\xc6\x9b\xf4\x8a\x45\x3c\xec\xfd\x55\xf9\x0b\xf7\x9d\x77\x3e\xfa\x72\x4a\x07\x9a\x4b\xfd\xf6\xba\xe3\x6e\x64\xf0\x99\xcc\x34\xf1\xdc\x07\x61\xc6\x93\xe1\xdf\x74\xee\x1c\x43\x4f\xab\xe7\xde\x49\xf9\x23\x65\xda\xb7\x32\x59\x06\xb2\x2d\xcb\x60\xb5\x8d\xe3\x94\xa7\x62\xed\x5f\x6d\xc5\x10\x03\x1e\x65\x49\xc0\xfb\x56\x98\x5a\x9d\x63\x48\x78\x76\x48\xa2\x13\xa9\xec\xf8\x6d\xb6\x7e\x10\xb7\xbc\xb5\x1c\x4c\xf0\x7f\x0f\x5c\x48\xf9\x9f\xdf\x6f\xb3\x6c\x9f\xbe\x7e\xf9\x72\x15\xc6\x87\xf5\xc9\x46\xea\xbd\x4e\x56\xf1\xee\x65\x18\x6f\x36\x41\xb4\x79\xb9\x8e\x57\xe9\xcb\x87\x80\x3f\xbe\x64\xb9\x84\xe3\xbf\xab\xc2\x7f\x18\x9a\xc7\xea\x59\x44\xe1\x53\xbd\xd6\xea\xa3\x76\x2c\x5b\x29\x3d\x56\xfe\xa9\xe2\x74\xaa\x2a\xcb\xd7\xe3\x42\x67\x11\xc8\xbd\xa1\x28\x2d\xc5\xf1\x54\x5e\x3b\x6b\xef\x1a\x16\x54\xe8\x53\xc4\x3d\x61\x64\x8c\x8c\xef\x9c\xd5\xd3\xea\xfe\xeb\xfa\x5b\x30\xa3\x40\xec\xa9\x42\x84\x3c\xae\xe6\x5d\x51\xd6\x39\x15\x9d\xbb\x8a\xc3\x70\x78\x11\x51\x8f\x58\x96\xc4\xc9\xed\x04\xde\x1e\x12\x71\xf7\x0b\x9f\x20\x16\xdd\x52\x5c\x1f\x65\xeb\xa7\x87\xfd\x3e\x4e\x32\xbe\x16\xe7\x90\x51\x99\xb2\xf9\x8f\x8a\xe3\xce\x86\x67\xf0\x18\x84\x21\xac\xf9\x3d\x3b\x84\xf2\x58\x2d\xfe\x69\x1b\xa7\x59\xf1\x96\xb1\x5e\x45\xec\xfb\xed\x42\x26\x27\x80\xea\x19\x3c\x0b\x54\x0f\x62\xc6\x77\x96\x5e\x54\xa9\x84\xdf\xf3\x84\x47\xab\xf1\x41\x28\xaf\x55\xf9\x08\x78\x0d\x6f\xc5\x7c\x2d\xfe\x77\xc7\x22\xb6\xe1\xc9\xcb\xab\xc6\x3d\x77\xe8\xb9\x0f\x78\xb8\x7e\x5d\xbf\x39\x77\x9f\x72\xa9\x76\xbf\xa1\xdd\x18\xec\x02\xf8\x0d\xcd\x44\x6a\x6b\xad\x2d\xbf\x11\xd2\xc3\x6e\xc7\x92\xe0\x7f\xc5\x44\xcb\x82\x1d\x87\x54\x2e\x72\xb0\x66\x19\x7b\xd6\x03\x8b\xf5\xde\x67\x72\x58\xe9\xaa\x51\x7a\x2a\x13\xec\xf8\x8d\x6c\x08\xb1\x63\x0c\xad\x7e\xa6\x3b\xee\x86\x1d\x36\xfc\x6f\x01\x7f\x34\x5b\x74\x47\x47\x16\x74\x47\xd7\x4f\xc5\x2b\x4c\x8b\xe1\x47\x98\xe9\x2b\x1a\xc3\xed\xa3\x58\x24\x57\x4c\x19\x7a\x78\x6d\xec\x65\x31\xa4\xdb\xf8\x11\x98\x6a\x9d\xd1\xf9\xbb\x12\x83\x68\x6c\x49\x35\xed\x10\x90\x9b\xc4\x23\x4f\xde\xc4\x87\x68\x70\x38\x14\x8f\x6a\xae\xe8\xb0\xbb\x1b\x3d\xda\x40\x4d\x6d\xb0\x8e\x0f\x77\xe1\xf8\xc7\x69\x4e\xbf\x45\xe5\x0c\xca\x76\x76\x51\xf9\x69\x70\x27\x8a\xe7\x56\x9e\x20\x55\xcd\x6c\xd6\x8c\xea\x11\x92\xca\x2b\xb9\xbc\xd9\x8b\xb2\xa2\xd7\x0e\xe1\x1a\x58\xf8\xc8\x9e\x52\xb8\xe3\x85\x26\xc0\x48\x66\xb6\x65\x11\xc4\x89\x52\xe7\xa9\x4d\x33\x18\x39\xb5\x88\xe7\xb0\xdf\x7f\xbb\x7d\xf5\xa1\xac\x1c\xa5\xaf\xe4\xa7\x3d\x5b\x5f\x49\xb3\xa6\xe8\x04\x23\xa9\xb8\x8e\x4a\xf7\x2c\xf9\x2c\xd7\xfa\x69\x97\xba\x9b\xc6\x7b\xa6\x5b\xef\x50\xef\xc1\x2e\x7a\xb2\xb1\xdc\x2c\x7a\x26\xbb\x1a\xa8\x8b\x68\xf9\x45\xcb\xa7\xfd\xd8\x48\xc7\xac\xa5\xbb\x20\x9a\x85\xc1\x26\xda\xf1\x28\xbb\xe2\x49\x10\x23\xe6\xa9\xe1\xd1\xb1\x33\x0c\x2e\x3a\xef\xb4\x5d\x2a\xe3\x48\x9e\x7c\x60\x1f\x07\x51\x06\xf7\xa2\x5d\x79\xb4\x1a\xbb\xfe\xa8\x27\x88\x6a\xd3\x2e\xd8\xed\x43\x2e\xea\xa5\x5c\x07\x94\x81\xfd\x49\x9e\xb2\xb6\x5c\x34\x56\xb0\x3b\xec\x8c\xc4\xb2\xe2\x0b\x61\x2f\x3f\x51\x8c\x20\x31\xac\xa4\xd7\x43\xfd\xc8\x26\xee\xa7\x1d\x8d\xbe\xfe\x79\x27\xa6\xf5\x17\x26\xea\x78\x04\x81\x5a\x2e\xe4\x77\x07\x29\xec\x0f\x77\x61\x90\x6e\xa5\x3f\xc2\x8a\x03\x7f\x18\x3e\x00\x55\xcf\xf7\xaf\xc4\x77\x1d\x32\x9e\x42\x90\xc1\xa3\x5c\x76\xa2\x58\xdc\x7b\x3f\x8b\x3a\x46\x62\x2a\xc4\x70\xcf\xc5\x2d\x98\x19\xad\x94\xf9\xc7\xab\xaa\xb1\x0c\xe2\x88\xe7\xaf\x90\x0a\xfb\xe4\x81\x85\xe9\x09\x2c\x95\xf7\x08\x0f\xcd\x44\x06\x29\xc4\xb2\xf7\x59\x28\x6d\x47\xfc\x4b\x90\x66\xa9\xba\x1c\xb2\x14\x18\x6c\x83\xc8\x60\xc9\x6d\x4e\xa4\xe9\xc7\xfa\x0d\x66\xe2\x82\xcb\x65\xaf\xf9\xe6\x79\x64\x34\x6e\x1b\xb3\xec\xc5\x75\xbe\x3e\x29\x7b\x86\xa8\x90\xd8\xa4\x64\x13\xaa\xe9\x92\xc5\x46\x7d\x27\x57\xce\x52\xa1\x5e\x54\xf0\x04\xae\xe2\x34\x0d\xee\xc2\x7c\x03\x4c\x5f\xc3\xcd\xd5\xec\xfa\xe7\x4f\x6f\xdf\xcf\xae\x97\x9f\x96\xbf\x5e\xcd\x3f\x7d\xb8\xbc\xb9\x9a\xbf\x3d\x7b\x77\x36\x3f\x3d\x32\x7a\x95\x92\x70\x7e\x76\x39\x3f\xca\xff\xfc\x66\x76\xfd\xc2\xa0\x28\x8f\x0e\x3b\x93\xe1\x70\x3c\x58\x49\x84\x00\x51\x47\xc4\xcf\xdf\xcc\xae\x07\x7f\x9d\x6d\x13\x9e\x6e\xe3\x70\x3d\x9d\x7e\x68\x59\xbe\x02\xb3\xa5\xbe\x90\x63\xa7\x2c\x5a\x73\xbf\xc8\x78\xb2\x0b\xa2\x7c\xa7\xcd\x58\x36\x3e\x3d\xf2\x23\x52\xb5\x2d\x6f\x82\x07\xae\xd6\xef\xda\x92\xfa\xe2\x05\xac\x94\xf6\x48\x0d\xac\xf1\x55\x41\x2c\xac\x2c\x02\xb6\xca\xc4\x99\x49\x1d\xc7\xbe\x1c\xb5\xde\x15\xa4\x6a\xf1\x5e\xb3\xa8\xcf\xe6\x58\x7f\xe4\x17\x89\x55\xfa\x8b\x28\x59\x1e\xdf\x1a\x47\xb3\x42\x58\xd5\x3e\xe3\xba\x9e\x2d\xcb\x20\x4b\x82\xcd\x86\x27\xe2\x60\x18\xc6\x8f\x47\x42\x66\x65\xd5\x6b\xbf\x63\x54\x64\xbb\x0e\xad\x77\xb0\xbb\xf8\x81\x9f\xc0\x4d\xb0\x0b\x42\x96\x84\x4f\x62\xeb\x19\xd7\x17\x89\x8f\x96\x25\x5f\xca\x3a\x02\x83\x47\xa6\x3c\xd3\x7a\xde\x33\x2a\xb2\x26\x4d\x76\x4d\xd4\xd3\x3f\xc5\x7b\xcc\x86\xd4\x31\x1c\x94\xcb\xe0\x17\x60\x61\x1a\xc3\xfe\x90\xc9\x8d\xb0\xd6\xd5\x4a\xd2\x09\xfc\xfe\xd4\xac\xe7\xb3\xe4\xb0\xdb\xa7\x45\x3d\x4e\xfe\x00\x30\x4b\xc5\xf0\x2a\xb7\xee\x55\x1c\xa5\xc1\x9a\x27\xe2\x34\x69\xa0\x05\x52\x4f\xe9\x4e\x79\x1f\x87\x61\xfc\x28\xbe\xf0\x3e\x3e\x24\xf5\x89\x0f\xff\x00\x50\xa3\xf7\x35\xfc\xf5\xd5\x91\x38\x86\xb2\x8c\x6f\xe2\xe4\x69\x7c\x65\x7b\xf1\xe2\x74\x76\xf9\xd3\xfc\xfa\xc5\x8b\x23\xf9\x09\xb2\x53\x5e\xc3\x8b\x17\xb3\x37\x8b\xbf\xcd\xc5\xdf\xfe\xf3\xa8\xf1\x82\xbf\xa0\x5f\xf0\x71\x76\x7d\x79\x76\xf9\xd3\xd0\x1b\xea\x2f\xf8\xde\xd5\x17\xbc\x99\x9f\x2f\x3e\x76\xe5\xff\xd1\xd9\x07\x54\x2f\x90\xf7\xb9\xe8\x75\xbe\xa1\x8d\x6b\x32\xb4\xab\xc2\xf7\xaf\xf2\x63\xd8\xfe\x90\xb5\xc7\xf8\xf8\xf5\x8f\x81\x6a\x06\x35\x6a\x8f\xf2\xaa\x34\xd7\x87\xef\x5f\xc1\xdd\x21\x43\xdc\x29\xeb\xd5\xfb\xe3\x2b\x60\x90\x37\x45\xeb\x25\xe2\x94\xb4\xca\xc2\x27\xb8\xe3\xd9\x23\xe7\xe3\x72\x85\xac\x68\x0d\x7f\x11\xff\x81\xc5\xcf\x43\x75\x2e\xaa\x60\x62\x4b\xfa\x4b\xeb\xfb\xe0\xaf\x9a\x3a\x8b\x17\x1b\xf6\x53\xef\xe2\x2a\xe5\xd6\xdb\xfb\x64\xec\xac\xf1\x6d\xda\x0e\x8c\x6e\xf3\x56\x87\x53\xe3\xc3\x03\xee\xfe\x2a\x1d\xb2\xe2\x51\x9b\x68\xfd\x5b\x8d\x8f\xf2\x3a\xdf\xad\xd8\x4c\x5b\xe6\xa4\xa1\xe4\xeb\x0c\x0f\xee\xba\x43\x97\x3a\x81\xc8\xf6\xa9\x14\x54\xe5\x76\x61\x76\xe5\x04\xf5\xd1\x62\x73\x15\x97\x42\x26\xb6\x1e\xe9\xd9\x07\x0c\x7e\x51\xf6\x81\xea\x1c\x6f\x28\xb1\x38\xed\xbf\x5d\x9c\x2f\xae\x1b\x47\x7c\xf8\xe9\x7a\xfe\xeb\x11\xbc\x39\xff\x30\x97\x7f\x9e\x5f\x9a\x9d\xfa\x01\x7e\x9d\x9f\x9f\x2f\x3e\x1e\xc1\xe2\x5a\x4c\xc6\x23\xb8\x9e\x9f\x9a\x9c\xfa\xcd\xcf\xfd\xe2\xa4\xd0\xa9\xb0\x61\x39\xf1\x55\x86\x3f\x15\x9f\x6e\x2e\x75\x7e\x69\xf8\x5b\xd5\x3c\x86\x3f\x56\x6d\x68\xf8\xe3\x6b\xa3\x56\x58\x07\x09\x97\xee\xcd\xcf\x33\x51\x4f\x8b\xd7\x3d\xdf\x64\x2d\x5f\x69\x31\x61\xcb\x56\x2a\x09\x8f\xe2\x1e\x83\x9e\xb5\x65\x75\x26\x99\xb9\xa7\x67\xd7\xf3\xb7\xcb\xb3\xc5\x65\x73\xf6\xca\x83\xdc\x11\xa8\xe3\x90\xf3\xd9\xa7\x7d\xa9\x61\x59\x59\x33\xd3\x29\x38\x37\x9b\x2a\x21\xbb\x1b\x76\x9f\xaa\x1e\xdb\xf1\x7c\x2e\x5e\x85\x1f\x54\x33\x55\xc7\x72\x34\xa1\x46\x91\x3a\x25\x23\x3e\xcf\xd8\x6c\x43\x30\xdc\x74\x5a\xe4\x6f\xa2\x76\xf8\x16\xe9\x58\x5b\xaa\x26\xa9\xfe\xcd\x70\x32\xe4\x26\x9a\x3b\x0e\x6b\x7e\x1f\x44\x6a\x6e\x65\xd2\x15\x46\xba\x06\xa7\x2b\x16\x16\xef\x31\x14\xb9\xe3\xe2\x00\x3d\xdc\x3b\x2d\x23\xf7\x74\xd6\x9a\xa5\xb1\x35\x1d\x2c\x17\x51\xd4\x8b\x1a\xfd\x59\x69\x28\xdf\x05\x3c\x5c\x4b\xdf\x21\xa5\x51\x6f\x79\x46\x18\x28\x41\x32\x06\xf7\x49\xbc\x93\x5d\x78\x93\xb1\xd5\xe7\x75\x12\x3c\xf0\x24\xef\x94\x14\x66\x57\x67\x2e\xad\xd8\x55\x3f\xbe\x33\x72\xeb\x03\xec\x61\x7d\xa0\x43\x8d\x1c\xfd\xc0\x69\xaf\x12\x5e\xde\xe8\x69\x55\xaa\xc6\x3a\x55\x56\x89\x5a\x37\x9b\x2c\x6c\xa6\x56\x37\x90\xbb\xc1\xbd\x69\x6d\x71\xb7\x16\x00\xb6\xd9\x24\x7c\xc3\xcc\x0f\x45\xe8\xee\x87\xee\x10\x98\x55\x2f\xc5\x0a\x70\x3e\x02\xf0\x75\x69\x0c\x88\x37\x4f\x85\x0b\x9f\x52\xd4\x26\xec\x11\x35\xe1\x4b\xa1\xb9\x01\xad\x74\xe9\x84\x0f\x15\xdb\xcc\xc3\xb5\x72\x5f\xdc\xdd\x05\x91\xe9\x2a\x0e\xb0\x3b\x84\x59\xb0\x0f\x1b\x5a\x69\xb9\x34\xad\x83\x7b\xe9\x41\x97\xc1\x43\xc0\x1f\x53\x88\xc7\x35\xa9\xc5\x53\x18\xfb\x4c\x4f\x80\xd8\xf1\x08\x75\xa3\xa5\xb9\x29\xb8\xac\x1e\xf6\x88\xa3\x9e\xf6\xf8\x44\xdb\x85\x8b\xa7\xb3\xcd\xdf\x96\x1f\xf3\x49\x59\x60\x6f\x0b\x5e\x1a\x31\x38\x40\x2a\xd4\x64\x2f\x16\xa6\xcb\x23\xb1\xcd\xa7\x7c\x15\x47\xeb\xf4\x48\xa9\xb0\x73\xe6\x05\x25\x55\xd2\x5c\x0f\xc1\x9a\xd7\xac\xb8\x91\xf4\x0a\x16\xff\xff\x5b\x6d\xe0\xfc\xcf\x6f\xb9\xa3\xf3\xae\x44\xad\x4f\x1e\x7e\x38\xa9\xa6\xd3\xff\xa0\x5e\x2c\xa1\x3c\xa9\x81\x4e\x25\x56\x76\x17\xc6\xab\xcf\xd2\x73\x57\xbc\x33\xb7\xcb\x3e\x06\xa1\xd9\x79\xb3\x78\xc4\x29\x28\x8e\x38\xdc\xf1\xfb\x38\x51\xdf\xb4\xe7\xc9\x71\x3e\xf4\x65\x57\x18\x9e\x0d\x8b\x67\xc5\x22\x21\x55\xd2\xfb\xca\x62\x54\xce\x00\x40\x9f\xd5\xd4\xb3\x3b\xa4\x99\x94\x99\x41\xc8\x59\x9a\xc1\x9f\x5f\x15\x3d\x29\x89\x26\x56\xab\x33\x6e\x88\xa8\xef\x83\x58\x42\xfc\x52\x4b\x78\x3b\x3b\x3f\xfb\xe9\xf2\xd3\xe5\xe2\x72\x7e\x2b\x06\x48\x31\xf6\xd6\xa6\x1a\x0d\xf5\xd4\xd6\xa1\xa0\x16\xd6\x40\xd9\xab\x78\x92\x48\xd5\x0c\x4a\x62\xb5\xce\x9d\xdd\x43\x14\x6b\xba\x09\x2b\xb1\xfa\x34\xc8\x2f\x1a\x85\xa0\x7a\x23\xe0\x06\x69\xbd\xc1\x0a\x63\x4f\xbd\x25\x82\x4d\x14\x27\x26\xfe\xf7\xc5\xb3\x4a\xe2\x34\x55\xf3\xe5\x9a\xaf\x0f\x2b\xb3\x03\x57\xf1\xb8\x59\xdd\xde\x76\xea\x40\x91\x35\xe5\x46\xdc\xad\xa1\xb1\x3a\x43\x3d\x5d\xa5\x46\x22\xe4\x48\x4d\x84\xd8\x8f\x14\xca\x6e\xe8\x37\x50\x3c\x77\xbc\xb4\x1a\xe7\x3b\x71\x63\x63\x95\x2b\xda\xb8\x99\xb3\xfe\xa4\x41\xb4\x69\x6e\xcf\x47\xf0\xb8\xe5\xf9\xca\x55\xdc\x10\x51\x22\x39\x5b\x6d\xeb\x5e\x48\xf9\x65\x30\xe1\xa9\x38\x0c\x44\x1b\xec\xb9\x04\xd4\x34\x60\x55\x64\x80\xf8\xbe\xdc\x1f\x58\x98\x70\xb6\x7e\xca\x27\x1a\x6e\xe7\xc9\x8d\x2b\x79\xfd\x82\x48\x5a\x99\x6a\x47\x68\x80\xcb\xd8\xf4\x78\xa9\x1e\x51\xab\x44\x0d\x97\xaa\x93\xd3\xd6\x0a\x8e\xdd\x1d\xe5\xa7\xd6\xab\x95\xaf\xf9\xc1\x1a\x56\xdb\x38\x58\x21\x9b\x72\xcd\xf7\x5c\x79\x8a\x89\xaf\xbe\x55\x97\xba\x4f\x9f\x83\x68\x7d\x5b\xb0\xe8\x70\x2b\x9b\xe6\x93\x98\x60\xb8\xe5\x2a\x57\x26\xc4\x49\xb0\x09\x22\xd6\xaa\xf7\x75\x31\x01\xd0\xdb\xdf\x93\x5c\xed\x9a\xee\x62\xd2\x0c\xcc\xaa\x83\x24\x4a\x68\xf3\xb3\xe3\xa4\xf1\xc5\x6a\xdf\x32\x57\x56\xa8\x47\x37\x7e\x96\x2d\x22\x41\x6e\xbc\x28\xa9\xf7\x41\x92\x6f\xd5\x6a\x80\xc3\xef\x53\xce\xe1\x76\xcf\x93\x4f\x4a\xee\xa7\x7c\x8b\xb9\xfd\x03\xb2\xb6\x10\x27\x32\xf4\x8d\xdc\xf8\xee\xe3\x64\xa7\x76\x86\x63\xca\xd5\x01\xe4\x76\x9a\xf7\xae\xdc\x4f\x6f\xa5\xb0\xa2\x8e\xf9\x9c\xb8\xb5\xd8\x50\xe5\xd6\xa7\xfb\x6e\x7c\x9b\xde\xf1\xba\x60\x31\xe6\xe5\x79\x28\x8a\x65\x43\xd7\xb7\x6a\xf3\x5d\x15\x14\xe1\xa6\x39\x6b\x4b\xd9\xd2\xa3\xa2\xfe\xde\xff\x83\x9b\x56\xe2\x38\xf5\x18\xa4\xd2\x58\x5b\x9e\x76\x6a\x27\x18\xa4\xfa\x5a\x3d\x85\x12\xfb\x7a\x7e\xfa\xe1\xed\x5c\x7e\xf1\x51\xf1\x3f\x17\xf3\xd9\x65\xf5\x3f\x67\xc6\xe6\x27\xf5\x14\xe5\x66\xbf\x94\x32\x6e\x3e\x5c\x54\x7f\x5e\x9e\x9e\xce\xff\x46\x12\xf9\x76\xf1\xe1\x72\x79\xd4\xf8\xbf\x4f\xcb\xeb\x0f\xf3\xd6\x5f\xbd\x9b\x9d\xdf\xcc\x49\x2f\x78\x77\x3d\x53\xba\xf5\x86\xd4\xab\xf9\xf5\xdb\xf9\xe5\xf2\xec\x7c\xfe\xe9\xaf\x7f\x25\xc9\xad\x4b\xf8\x51\x27\xf7\xc7\x57\xb6\x72\x5f\x55\x72\xcb\xaf\x38\x9f\xdf\xdc\x7c\x5a\xbe\x9f\x51\x3b\xf0\xe7\xf9\xa7\xd3\xb3\x9b\xe5\xf5\xd9\x9b\x0f\x42\x9e\x99\x31\x43\x3d\xe6\x26\x0d\xf5\x1c\xd7\x07\x22\xa5\x9c\x18\xb3\xa4\x72\x67\xb4\x62\xb3\x5f\x28\xc5\x6e\x3e\x5c\x90\x8a\xc9\x39\x43\x29\x29\x67\x04\xb9\xa0\x9c\x07\xf4\xd2\x72\x22\x52\x8a\x37\xe6\x21\x45\x40\x63\xca\xda\x0a\xf8\xd1\x52\xc0\x8f\xaf\x2c\x05\xbc\x22\xd5\xa0\xbb\x0c\xd0\x06\x7a\x6b\x15\x30\x16\xb2\x49\xe2\xc3\xfe\xcd\x93\x32\x49\xe0\xaf\x99\x26\xae\xb9\xf5\xa7\x0d\x14\xd6\xdf\x6e\xa3\x41\x4b\xb9\x0c\x6c\x73\xaf\x2c\x2b\xe2\xc4\x94\xa8\x90\x92\xa8\x35\xf5\x51\x9e\x60\xfa\xce\x45\xd5\xd1\x00\x77\xe8\x90\x1a\x3e\xd9\xce\x9f\xee\x9e\x3e\xa9\x2a\xde\xd6\x7c\x8a\xb7\xf1\x23\xfa\x28\x5b\x3f\x66\xb3\x84\xc3\x9e\x25\x59\x20\x9a\xa3\x08\x65\x95\x1e\xee\xd2\xfe\x78\x29\xfa\x67\x9f\x04\x71\x52\x86\xf8\x2b\x48\x92\x9a\xf2\xbf\xba\x31\xe1\x5a\x60\x2e\xee\x9b\xaa\x46\x90\x07\x48\x4b\x1b\x9f\x90\x6d\x19\xee\x80\xb8\x65\x0f\xb9\x33\x36\xdb\x15\x97\xe0\xfb\x38\x51\x37\xdb\xfc\x8a\x23\x9b\x1c\xa7\x04\xc9\xf9\xed\xf4\x44\x55\x39\x88\xa4\xde\xf3\xd0\xbc\x26\x61\x8f\xc8\x0c\x76\x7c\x77\x27\xee\x9b\xf7\xc0\xbf\xb0\x95\x0a\x14\xc0\xf3\x06\x51\x97\x45\xfd\xa0\x43\x5f\xc0\x2b\x05\x24\xaf\x35\x79\xae\x2f\x45\xd8\x9d\xaa\xe7\x2c\x2b\xbc\x32\xf6\x05\x01\x21\x03\x60\x88\x0a\x02\x93\xb5\x26\x5e\xf0\x0a\xc6\x5e\xae\x24\xe9\x11\xa4\x71\x43\x6f\xb6\xdb\x87\xc1\x2a\xc8\x42\xcc\xea\x02\xd5\xe8\xba\x2d\xc4\x9f\xc8\x0b\xe3\x09\x14\x56\xd7\x08\xa9\x30\x28\x27\xbd\xb8\x90\x75\xa7\xb1\x8c\xff\x96\xcf\x10\xa4\xe2\x80\x3d\xb2\xa7\x13\x90\x57\xb1\x8e\xd8\xbc\xcd\xcb\x97\xe3\x04\x47\xeb\x52\xfb\x52\x1f\xb7\x8d\x49\x43\xef\xab\x9a\x47\x7b\x6b\x19\xc2\xd5\xb2\x6c\xb5\x5c\x2b\x56\x68\xba\xe2\x43\xd6\xbe\xa7\xe3\x86\xec\xc0\xdd\x56\xb6\x6a\xee\x03\x61\xa3\xdd\x46\xeb\x74\x89\xde\xb4\x5d\x01\x38\xcf\xda\xfa\x63\xe8\x65\x5b\x7f\x88\xba\x64\x6a\xe4\x8e\xe2\xd9\xf3\x44\x29\x76\x67\x4a\x83\xf0\xfc\xea\xef\xab\x56\x0d\xbe\x35\xe5\x77\xbb\x7e\x36\xaa\xef\x59\x04\xb7\xb3\x42\x55\xa3\xfe\xe5\x4e\xac\x16\xf1\x18\x8e\xdc\x7c\xb2\x18\xee\x92\xe2\xd0\x50\x29\x97\x73\xd2\xa5\xa6\xc5\xc6\x2d\xbf\x35\xbd\x79\xc6\x77\xfb\x38\x61\x61\x65\xfb\x3d\x81\xb9\x0c\xe7\x8d\xdb\xe2\xe3\xa6\xb5\xe7\x48\x2e\x96\xa5\xcc\x54\x41\xd5\x68\xad\xb2\xee\xb3\x75\xea\x26\x74\xa3\x72\xd8\xb1\x6c\xcb\x77\xac\x88\xf7\x27\x77\x0b\xb9\xcd\x6f\x78\xb6\xe5\x09\x6e\x21\xab\xb4\xfc\xf5\x8e\xa9\x33\xc9\xf9\x39\x0a\xa7\xae\xed\xaa\xd5\xa4\xfa\x97\x47\x6b\xd9\xe9\x69\xc6\x76\x7b\x5c\x9b\x2a\xa0\x44\x6a\xc2\xef\x0b\x2b\x6d\x10\xaf\x95\xd2\xbf\xd9\x67\x38\x55\x5d\xa5\xf6\xdf\xb1\xa7\x96\xe1\xb6\x50\xe5\x13\x06\xa9\x46\xed\xdf\x54\xe5\x5b\xe8\xbf\xc5\x96\xde\x50\x80\xa3\x7c\xf5\xf2\xef\xd6\xaa\xfc\x4b\x37\x06\xa9\xc6\x5f\x6d\x59\x64\x10\x27\xa5\xfe\x74\x6d\x14\xb9\x65\xd5\xd6\x44\x31\xac\xa8\xa7\x59\xd3\x73\x15\xfd\xb0\x6e\x9d\xb2\x40\x99\xe8\xd6\x6d\xcd\xca\x3a\xdd\x3a\xa9\x19\xaa\x33\xad\x18\x57\xe2\x48\x54\x52\x42\x64\x33\xb8\x1c\xa1\x3d\xba\xf5\xe9\xd4\xea\x93\x2a\x3b\xab\xa6\x20\x14\x3b\x9d\x9f\x2f\x67\x84\x72\xd7\xb3\x25\xe5\x75\x67\x97\xcb\xf9\xf5\xd5\xe2\x9c\x56\xfa\x72\xfe\xcb\xf2\xd3\xe2\xfc\x74\x3e\x0c\x6e\xeb\x0b\x63\x95\xb3\x79\x29\xa4\x6e\x36\x2f\x85\x55\x20\xab\x62\x78\x05\xab\x2a\x87\xd5\x04\xe7\xa5\xf0\x8a\xe0\x5a\x35\xf1\xea\xd4\x7a\x61\xbc\x36\x57\x95\xa6\x2b\x73\x55\x79\xba\x2e\xb7\x5b\x1e\xa7\x48\xed\x94\x47\x6a\x72\x3b\xe5\x91\x8a\xdc\x62\x38\x53\x35\xb0\x9d\x3a\x7c\x7a\xfb\xde\x18\x6a\x32\x0b\xb3\x5a\x3c\x84\xbb\x11\x25\xec\x6a\xf1\xf4\xb8\xbf\x8b\x53\xd2\x6f\x95\x83\x60\xfe\x0d\x03\x11\x53\xab\xdf\xbe\x64\xfb\xe0\xe5\xc3\x0f\x2f\x55\x91\xd4\xdc\xa0\xae\x7c\x1f\xd7\x3c\xca\xa4\x53\xa5\x0a\x91\x23\xcf\x2c\x85\xfa\xa9\x8c\x01\x6a\x7e\x9a\x16\x5b\x5e\x19\xec\x33\x8b\x31\xe1\x71\xf6\xc1\xea\x73\xfb\x7a\xf7\x7c\xae\xd5\x57\x9a\xb7\x63\x25\x39\xbf\xdd\x5a\x54\xaa\x39\xd0\x58\xf4\x59\x0c\xab\x3b\x26\xfd\xb0\xea\x5e\xcd\x52\xe6\x94\x4e\xc9\x48\x76\x50\x3d\x6e\x34\x16\x38\x8e\xb0\x25\xe2\x59\x3a\x13\x8f\x1d\xaa\xa7\xe9\xa7\xf7\x3e\x7e\x2c\xe2\x53\x29\x57\x7a\xd5\xdb\x48\x37\xbd\x94\x87\x7c\x95\x75\x6c\x10\xb0\x67\x12\xc3\x4f\xe2\xc3\x66\x8b\xbe\x60\xe5\xe3\xab\x1b\x2b\x48\xcf\x20\xe2\x8c\x48\x8b\xab\x23\x78\xb3\x58\x2e\x17\x17\xd3\x5a\xf8\xe9\xe8\x62\x21\x61\xb9\xb8\x42\xfd\x5e\x7d\x95\x71\x91\xe8\xb0\xab\x46\x16\x7e\x9e\x0d\xe5\xe1\xd0\x3f\xe3\xd9\x39\xf4\x4f\x6b\x82\x5e\xd6\xeb\x4d\x1d\xfe\x62\xf4\xcb\xb4\x4a\x8d\x91\x1b\x2b\x64\x16\xab\x59\x6a\x0f\x76\xe4\x02\x09\xc5\xe4\xbb\xe0\xd9\xf6\x6b\x10\x18\xd7\xf5\xd7\x7f\xb3\xcb\x5e\xa3\x96\x56\x4b\xdf\x6d\xde\xde\x9f\x76\x52\xd6\x6d\xcd\xe4\x87\xed\x7a\x69\x1e\x6c\x3a\x26\x2b\x55\x95\x8a\x63\x2e\x0d\xe8\xf1\xfa\xb0\xc2\xab\x82\x94\x2d\x56\x85\xd5\x97\xc1\xcb\x9b\x1e\xd1\x7b\xac\x99\xa8\x6d\x66\xca\xe2\x02\x5d\xa0\xda\x87\x3a\xcb\xf3\xc5\x7c\xf9\x7e\x71\xda\xe4\xc3\xf3\xbf\x93\x4e\x76\x28\xe9\x45\xc1\xd9\x2f\x95\x90\xb3\xcb\xf2\xcf\xd2\xc9\x2e\xff\xb3\xb8\xac\xdf\x2c\xa7\x5d\xce\xbb\xdf\x46\x29\x8e\xbe\x76\x57\xad\x40\x2a\x86\xd4\x28\x54\x6d\x4b\x29\xa6\xba\xc1\xa8\xa4\x22\x70\x58\xf2\x34\xfb\x1a\x44\xe4\x8d\xe6\xed\x5f\xfd\xd8\x6e\x51\xa9\xa6\x15\x4a\xa6\x95\x64\x79\x13\x37\xbc\x4e\xd8\xbd\xf9\x55\x00\xe0\xb6\x56\xb4\xbe\x44\x7a\x18\x51\xfb\x78\x18\xd1\xf0\xf1\x30\xa2\x87\x11\x3d\x8c\xe8\x61\xc4\xe1\xad\xcf\x53\x89\x9e\x4a\xf4\x54\xa2\xa7\x12\xcd\x1f\x4f\x25\x7a\x2a\xd1\x53\x89\xea\xf1\x54\x62\x47\xae\xa7\x12\xc7\x1e\x4f\x25\x0e\x16\xf3\x54\x22\x4e\x80\xa7\x12\x3d\x95\xe8\xa9\x44\x4f\x25\x7a\x2a\xd1\x53\x89\x9e\x4a\xf4\x54\xa2\xa7\x12\x0d\x04\x78\x2a\x71\x92\x2a\xff\x2b\x50\x89\x3a\x2d\xb8\xc7\x13\x3d\x9e\xe8\xf1\x44\x8f\x27\xa2\xbe\xdb\xe3\x89\x1e\x4f\xf4\x78\xa2\xe9\xe3\xf1\x44\xc3\xc2\x1e\x4f\x6c\x97\xf2\x78\xa2\x55\x79\x8f\x27\x1a\xe1\x89\xed\xa4\x31\xd7\xe2\x50\xf3\xfc\x99\x63\xe4\x6b\x31\x62\x9c\x5f\x90\x4c\x6b\xd0\xb8\xf5\x5c\x69\x92\xc7\x30\x90\x07\x43\xe3\x8c\xa5\xb2\x31\x1f\x63\x0d\x03\x67\xa4\xbb\xc0\x3a\xc2\xae\x79\x14\xef\x82\x88\x65\xa6\x39\x2e\x5d\xf8\x44\x9f\x56\x2f\xc5\x0a\x98\xa6\xa3\xf1\x15\xea\x98\x0d\x6a\x0d\x59\x1c\x36\x65\xc7\x9b\x1e\x27\x30\x59\x83\x00\x97\x39\x08\xa8\x0e\xd2\x78\x7f\x79\xa0\x8d\x0f\x70\x91\x49\x08\x9e\x75\x9c\xd0\xea\x37\x49\x86\x21\x98\x28\xcb\x10\x20\x32\x0d\x51\x2e\x63\x98\x6c\x43\x40\x1c\xc3\x60\xe7\xe8\x0f\x36\x9a\x4b\x87\x0e\xff\x60\xe6\xf4\x8f\x94\x58\x99\x24\xd2\x41\xc7\x7f\xb4\xd8\x3a\x28\xa0\x77\xfe\x47\x8b\x7c\x06\x58\x00\x0c\x81\x01\xb4\xd0\x02\xa9\xeb\x87\x06\x08\xcd\xa1\x9c\xce\xc7\xc0\x01\xb4\x60\x65\x00\x1d\x82\x07\xd0\x22\x5b\xb0\x81\x06\x20\x40\x8b\xec\x07\x0e\x6a\x6b\x1e\xbe\xf3\x7b\xa1\x03\x92\x76\x40\x3d\x03\xe0\x01\x95\x93\x00\x95\x31\xbf\x0f\x3e\xb0\x94\xec\x02\x40\x00\x6b\x08\x01\x9c\xae\xbc\x56\x30\x02\x7c\xad\x83\x85\x35\x97\x00\xa6\x6c\x02\x7e\x90\xc4\x23\x7c\x02\x6d\xe5\x65\xc3\x8c\x02\x71\x35\x8b\xef\x47\x39\x05\xfc\xce\x59\x38\x70\x0c\xb3\x0a\xc4\x85\x7d\x3d\x01\xaf\x00\x93\x30\x0b\x30\x09\xb7\x00\x66\xec\x02\x7e\xdf\xe8\xda\x3b\x9a\x36\x0c\xe2\x38\xa8\xf1\x0e\x1d\x86\x81\x3a\xb4\xda\xcc\x43\xb3\x09\x28\x1b\x86\x96\x7b\xa8\x8d\x2d\xf2\xc7\xeb\xd9\x07\xc5\x33\xa0\x85\xba\xe7\x1f\x60\x12\x06\x02\x0c\x39\x08\xca\x31\xc4\xb9\x99\x06\x74\xa6\x9a\x0e\x0f\x61\x75\x0e\x3b\xd1\x33\x11\xb4\x6a\x76\x19\x8a\x9a\xdd\x06\xbf\xec\x0d\x72\x14\x05\x1b\x81\x16\xeb\x9e\xa5\x80\x69\x78\x0a\x78\x0e\xa6\x02\x26\xe4\x2a\x60\x42\xb6\x42\x2f\xdb\x21\x5f\x01\x8e\x18\x0b\x20\x58\x1c\xc1\x82\xb5\x00\x0b\xde\x02\xe8\xcc\x05\xd0\xb9\x0b\xa0\xb3\x17\x60\xc5\x5f\x80\x0d\x83\xd1\x2d\x8c\xb5\x98\x69\x24\x60\x8d\x76\xe0\x82\xc7\x00\x17\x4c\x46\xaf\x10\x8c\x0d\xad\x4f\x08\xca\x90\xd7\x27\x04\x65\xcd\x03\x67\x8c\x06\xb8\xe1\x34\xc0\x86\xd5\x00\x32\xaf\x01\xee\x98\x0d\x98\x8e\xdb\x80\xc9\xd8\x0d\x30\xe3\x37\x28\xea\x9a\x51\x86\x03\x7f\xb0\x57\xcc\xc7\x38\xc7\x81\xbf\x86\x94\xdc\x47\x3f\xcb\x41\xbd\x31\x49\xb5\x73\x0f\xcf\x81\x96\xa9\xe3\x3f\x1a\x4c\x07\xe1\x0a\xd2\xc3\x80\x94\x5c\x07\xbe\x35\x27\xe5\x40\xc0\x84\x05\x21\x5f\x1a\x87\x78\x10\xfc\x09\xbf\xc5\x8f\x68\x98\x10\xc2\xcc\xd2\x30\x24\xe3\x5c\x08\xfa\x3d\x0d\x3c\x43\xcb\x86\xe0\x9b\xa3\xc1\x92\x0c\xf1\x21\x94\x01\xd1\xac\x70\x9b\x11\xa1\xce\xde\xe6\xc4\x6d\x72\x22\x54\x25\x76\x77\x69\x24\x12\x36\xd0\xd1\x50\xda\xb2\x25\x30\x19\x5f\x02\x2e\x18\x13\x70\xc1\x99\x80\x03\xd6\x04\x68\xbc\x09\xd8\xd9\x0f\x6c\xb9\x13\xb0\x64\x4f\xc0\xa9\xf9\xc3\x82\x41\x81\xaf\x65\xfc\xb0\xc4\x51\xc0\x0c\x49\x41\x0f\x8b\xad\x0a\x65\xdd\x8f\xa5\xe0\x97\xbf\x8e\xc5\xa3\x17\x4d\x41\xcb\x56\x28\xcb\x28\x9e\x82\x96\x2b\x71\x96\xe9\x10\x15\x98\x06\x53\x01\x43\x54\x85\x76\x56\x74\x8f\xab\x80\x11\xb2\x42\x33\x31\x49\x3c\xa3\x17\x5b\xf9\x17\x31\x01\x91\x71\x10\x18\x0b\x5f\x35\x23\x61\x40\x90\x87\xb0\x52\xc8\x4b\x2f\xc6\xe2\xc4\x6c\xd5\x45\x59\xa8\x07\xaf\xca\x60\xa3\xc1\x59\x28\x23\xa1\xd7\xbe\x52\xb3\x97\xb8\x3d\x2b\xd9\xba\x3b\xf4\xc6\x9a\xa2\x18\x45\xa6\x40\x61\x60\x32\x1c\x06\xdc\x22\x31\x40\x56\x52\x93\xd0\x18\xb0\xc0\x63\x80\x8e\xc8\x80\x35\x26\x03\xb6\xa8\x0c\x90\x71\x19\x20\x23\x33\x40\xc7\x66\xc0\x02\x9d\x01\x32\x3e\x03\x36\x08\x0d\xd8\x62\x34\x5d\x01\x34\xad\xbc\x1d\x4e\x03\x0e\x90\x9a\x3e\x19\x78\x45\xb8\x1d\x5a\xd3\x23\x83\xa0\x90\xb7\x47\x6c\x34\x75\xc1\x64\x01\x03\x74\x26\x30\x70\x16\x93\x00\x95\xa8\x09\xbe\xad\xac\x60\x30\x55\x66\x30\xb0\xcb\x0e\x16\x1d\x76\xe2\x30\xfd\x9c\x1c\xcd\x65\xf1\x4a\x6c\xf1\x69\xee\xf1\xd8\xea\x74\x4c\x38\x65\x13\x7a\x82\xe6\xdf\x95\xa0\x29\xc7\x88\xe7\x67\x3c\x3f\x33\xfa\x78\x7e\xc6\xf3\x33\x9e\x9f\xf1\xfc\x8c\xe7\x67\x3c\x3f\x83\x3e\x56\x78\x7a\xc6\xd3\x33\x9e\x9e\xf1\xf4\x8c\xa7\x67\x3c\x3d\xe3\xe9\x19\x4f\xcf\x78\x7a\xc6\xd3\x33\x9e\x9e\x31\x2d\xea\xe9\x19\x4f\xcf\x78\x7a\x46\xff\x78\x7a\x66\xe0\xf1\xf4\x8c\xa7\x67\x3c\x3d\x53\x54\xda\xd3\x33\x9e\x9e\xa9\x3f\x9e\x9e\xf1\xf4\x0c\x5e\x88\xa7\x67\x3c\x3d\x83\x31\x7d\x78\x76\xc6\xb3\x33\x9e\x9d\xf1\xec\x8c\x67\x67\x3c\x3b\x03\x9e\x9d\xf1\xec\xcc\x68\x51\xcf\xce\x10\x4a\x7a\x76\xc6\xa8\xb0\x67\x67\x3c\x3b\x33\x5e\x17\xcf\xce\xfc\xe7\xb2\x33\xfb\x60\xf5\xb9\x7d\xb9\x7d\x3e\x8c\xe6\x4a\xf3\x76\xac\xa4\x69\xee\xf6\x16\x35\x6b\x8e\x3a\x16\x7d\x16\x63\xec\x8e\x49\x97\xc3\x4e\x6a\x21\xd3\x43\x0a\x85\x20\x58\x07\x09\x5f\xd1\x08\x18\xdb\xa9\x7a\x5a\xbc\x9a\x22\xe2\xf9\x7a\xb4\xac\xa6\x4d\xc6\xdd\x17\xef\x95\x72\xe5\x90\xf2\x1c\x86\x51\x5d\x8e\x34\x5f\xa5\x3c\xe4\xab\xac\x93\x51\x1f\xf6\x2c\x15\x7f\x4a\xe2\xc3\x66\x8b\xbe\x86\xe5\x83\xac\xe3\x32\x04\xa7\x67\xd7\x73\xb5\x2f\x7f\xb8\xbc\xb9\x9a\xbf\x3d\x7b\x77\x36\x3f\xc5\xad\x3a\xcb\xc5\xd5\x11\xbc\x59\x2c\x97\x8b\x0b\x8c\x37\x08\x3e\x73\xa3\xb6\xae\x28\x09\xcb\xc5\x15\xea\xf7\xea\xab\x8c\x8b\x44\x87\x5d\x35\xb2\xf0\x93\x2d\x88\x32\xbe\x41\x6d\x6c\xe2\x9e\xca\x32\x59\xf2\xcf\x7f\xa2\xcf\xd2\xcb\x7a\xbd\xa9\xc3\x5f\x8c\xfe\x1d\x8b\x9e\x9a\x23\x57\x2a\x56\xf0\x39\xa7\xdb\x83\x1d\xb9\x4a\x42\x31\xf9\x2e\x78\xb6\xc5\xf1\x52\x6e\xd6\xbd\xeb\xfa\xeb\xbf\xed\xb5\xaf\x51\x55\xab\xf5\xef\x36\x6f\xf4\x4f\x3b\x29\xeb\xb6\x66\x7b\xc5\xf6\xbf\x54\x49\x36\x75\xca\x4a\x9b\xc6\xa3\x2c\x7c\x52\xde\x11\x31\xda\xd0\x9a\x6d\x0b\x83\xfa\xe3\x36\x58\x6d\x4b\xaa\xa9\x86\x01\xec\x59\x82\x97\xd9\x1a\xf1\x39\x15\x44\x35\xac\x75\xd6\xe8\x8b\xf9\xf2\xfd\xe2\xb4\xb1\x40\x17\x7f\x27\xbd\x35\x51\xd2\x8b\x82\xb3\x5f\x2a\x21\x67\x97\xe5\x9f\xa5\xa7\x66\xfe\xe7\xf3\xd9\x72\x7e\xb3\x9c\x76\x4d\xef\x7e\x1b\xa5\x38\x3a\x81\x6c\xd5\x0a\xa4\x62\xc8\xdc\xb8\x55\xdb\x52\x8a\xa9\x6e\x30\x2a\xa9\x00\x37\x96\x3c\xcd\xf0\xd8\xb3\xfd\x29\xfe\x46\xf3\xf6\x6f\xe3\x14\x6f\x51\xb3\xa6\x15\x6e\xbf\x0f\x9f\x80\xe5\xed\x5c\xf7\x23\x02\x76\x8f\xb9\x93\x96\x68\xbd\x58\x23\xc5\xb2\x73\xc8\xcc\x15\x95\x24\x0e\x9e\x4e\x10\xbb\xd9\x11\xe9\xe4\xb0\x01\x35\x5c\x51\xc0\xb8\xfb\xf2\x00\x31\xdc\x20\x80\xb1\xdb\x57\x97\x16\x7e\x0e\xfa\xd7\x84\xfc\x95\x7b\x1e\x4a\xea\x20\xf5\x5b\xd0\x9b\x28\x89\x63\xc4\xaf\xda\xf8\x50\x22\x87\x68\xdf\x36\xbd\x8b\x1b\x22\x1d\xd2\xd7\x15\xa8\xda\xf4\x70\xe9\xa1\x76\x51\x12\x2b\x32\x64\x80\xd8\xc5\xdd\x45\x2b\xd2\x66\x80\xd6\xc5\x0d\xd2\x2e\x1c\x64\xe9\xeb\x63\x47\xe9\xba\x59\xdd\xac\xe8\xdc\xaf\xb0\xf9\x59\x93\xb9\x46\x54\x2e\x56\xf5\x31\x4c\xe4\xe6\xfe\x6b\xb8\x01\x3c\x44\xe3\x96\x74\x2d\x4a\xe4\x18\x89\x4b\x41\xf1\xc6\x28\xdc\xc2\xae\x8d\x12\xea\x9e\xc0\x75\x4f\xdf\xba\x77\xbc\x30\xa0\x6e\xe9\x8e\x17\x83\x4e\x17\x25\x41\x8b\xde\x0c\xeb\xb4\x6d\x1f\x3d\x8b\x12\xda\x71\xdb\xd0\x92\xb3\xc8\xc3\x45\x77\xfc\x68\xa9\x59\xa4\x5e\x32\x69\x3a\x6e\x38\x22\x66\xdd\xd3\xb2\x26\xa4\xac\xc5\xf6\x3a\xec\xc7\x81\x5d\x45\xfb\x09\xd9\x26\xf1\x8a\x92\xab\xa5\x63\xfb\x68\x57\xdc\xb4\x1a\x71\xe0\x28\x55\x22\x84\xf5\xef\xf5\x04\x94\xeb\x04\x84\xeb\xe4\x74\xeb\x54\x64\xeb\x54\x54\xeb\xa4\x44\xab\x0b\x9a\x15\xaf\xeb\xa2\x51\xac\x54\x82\x95\x48\xaf\x12\xc9\x55\x22\xb5\x4a\x27\x56\xc9\xb4\xaa\x1d\xa9\x6a\x49\xa9\x5a\x13\xaa\xd6\x74\xaa\x35\x99\x6a\x4d\xa5\x5a\x13\xa9\x6e\x68\x54\x07\x24\x2a\x99\x42\xa5\x11\xa8\x8e\xe8\xd3\x89\xc8\xd3\x69\xa8\x53\x03\xe2\x14\x7d\x94\x1d\xa3\x4d\x0b\x7a\x14\x25\x74\x94\x34\xad\x91\xa3\x28\xc1\xbd\x94\x69\xdd\x3b\x00\x25\xb1\x87\x30\xd5\x12\xa3\xc8\xd3\x7c\x45\x97\xf6\xd3\xa2\x48\x15\xad\x22\x4b\x27\x26\x45\x47\x29\x51\x92\x25\x6f\x88\x10\xed\x10\x9f\xc8\xdb\x48\x9b\x0e\xd5\xd3\x9e\xb8\xab\xe8\x18\x19\x2a\x3e\x85\x76\xc1\xd1\x52\xa1\x36\x3c\xe3\x00\x11\xda\x21\x3c\x71\x82\x5b\x34\xa8\x96\xee\xa4\xf7\xd5\x51\x2f\xd9\x89\xab\x65\xc5\xc6\xba\xa5\x3a\xa7\x21\x3a\xad\x69\x4e\x6b\x92\xd3\x96\xe2\x24\x10\x9c\x64\xfc\xd1\x8e\xdc\xb4\xa1\x36\xdd\x28\xc3\x2d\x68\xcd\xaf\xa0\x0a\xb7\x24\x35\x0d\x28\x4d\x74\x58\x8c\x41\x42\xb3\xae\xd3\xc6\x2d\xc6\xc3\x74\x66\x4e\x5b\xe2\x36\xfc\x11\x32\x53\x91\x96\x68\x1d\xf3\x24\x54\xe6\x04\x44\xa6\x09\x8d\x29\x0f\x13\x38\xe5\xad\x73\x12\x73\x9c\xc2\xac\xfa\x0c\xa7\xb8\xeb\x23\x30\xeb\x8a\x7d\xc2\x20\xd5\x18\x01\x9a\x8a\x7d\x0b\x6d\x78\x9b\xbc\xcc\x9b\x04\xf7\xdd\xc3\xd4\x65\x8d\xa2\x44\xfb\x92\xe9\x88\x4b\x6b\x83\xc5\xb0\xda\x9e\x66\x69\x6f\x91\x96\x7a\x4d\x3b\x65\x81\x72\x1e\x93\x72\x8a\x78\x94\x1d\x6c\xd1\x21\x2d\x39\x0d\x29\xe9\x90\x92\xc4\xab\x3e\x49\x74\x24\x95\x8c\x24\x52\x91\x76\x44\xa4\x15\x0d\x49\x23\x21\x69\x14\x24\x91\x80\xa4\xd2\x8f\x34\xf2\x91\x4c\x3d\x5a\x11\x8f\x76\xb4\xa3\x2d\xe9\x68\x4b\x39\xda\x12\x8e\xb6\x74\xa3\x2d\xd9\xe8\x82\x6a\x24\x12\x8d\x59\xf3\xee\x70\xce\xa2\xcd\x81\x6d\xb8\xc9\x1a\x87\xbc\x32\xb5\xae\x4a\x4b\xfd\x8b\x0d\x04\x35\xef\x1d\x0a\xf5\x2b\xbd\x67\xee\x79\xd6\xf4\x9f\x1f\x5f\xdd\x0f\x51\x90\x2d\x1e\x78\x92\x04\xeb\x67\xf8\xee\x0f\xb5\xb7\x61\x3f\x56\x1c\x0d\x45\x6d\xc5\xe1\x46\x9e\xb5\x73\xbd\x95\x3a\x8c\xc8\x4f\x37\xd4\xf9\x34\x8e\x45\xd2\x67\x2e\x52\xea\x85\x3c\x07\x87\x7c\x8b\x84\x04\x62\xf3\xba\x8a\xfd\xfb\x49\x15\x95\x2e\xa4\x6c\x25\xb1\x82\x48\xb2\x7e\xaa\x72\xb9\xaf\xa3\xf8\x10\x45\xf0\x18\x89\x0d\xd2\x4a\x69\xcb\xd4\x9f\x7f\xbb\x15\xef\xb9\x35\x47\x56\x13\x7e\xff\xe9\xe1\x87\x97\x09\x4f\xb3\x97\x0f\x3f\xbc\x2c\x60\xd1\x13\x75\xe8\x3c\xcd\x9b\x39\x36\x25\x5a\x73\x95\x4e\x04\xb7\x17\x2d\x01\x03\xbe\x05\x19\xff\x92\x0d\x8d\x30\x43\x47\xf4\xf6\x4c\xe2\x5f\x8c\x7e\x6d\xac\x58\x18\x11\xd8\x9a\x7f\x09\x7b\xcc\x27\x83\x38\xb3\xef\x58\xf2\x79\x1d\x3f\x46\xb0\x0e\xd2\x7d\xc8\x94\x39\x80\x7f\xc9\x0e\x6c\xd8\xf7\x56\x8c\xe4\x91\x48\x49\xea\x43\x56\x71\x74\x1f\x06\xab\x6c\x50\x25\x75\x0c\x5f\x9e\xde\x6e\x59\x32\xf4\x19\xc7\x90\x16\x0a\x94\xc1\x5f\xdd\x85\x2c\xfa\x3c\xf8\x8b\x30\xde\xa4\x57\x2c\xe2\x43\x5f\x68\xea\xb9\x9e\xb7\xc3\xd8\x3a\x84\x58\x83\xda\xfe\x9a\xea\x05\x23\x85\x3a\xcb\x8e\xe8\xc3\xa2\x72\xb9\x42\x21\xef\xe1\xb1\x03\x74\x4e\xea\x4d\xf6\x41\xef\x4c\xd6\x11\xda\x24\x50\xa2\x0d\xd4\x62\x1a\x1a\xb6\xdd\x66\x41\x9a\xb7\x44\x86\x72\xeb\x29\x5c\x79\xde\x2d\xae\x2f\x66\xcb\x16\x09\x35\xbb\xfe\xf9\x74\xf1\xf1\xf2\x08\xae\x67\x1f\xc7\x1c\x38\x4c\x6e\x2e\xc7\x9a\xd7\x8c\x16\x29\x6a\x31\xfa\xc3\xeb\xd9\xc7\xfe\x95\x31\xc8\xc2\xc1\xcd\xd7\x70\x7c\x74\x0e\x19\xd9\x60\x1b\x37\xfa\x6d\x21\xff\xcb\x42\xb5\x3d\xc9\x1a\x15\x5a\x84\x47\x39\x26\xfa\xc7\x79\xbe\xda\xb8\x5f\xdb\x7f\x19\x5d\xc6\xf0\x23\x7b\x5c\x66\x6b\x85\x5f\x89\x9f\xb7\xcc\x81\xa3\x09\x00\x51\xab\xb5\xd9\x4a\x9c\x0d\xef\x4b\xae\x96\x6a\x93\x5c\x9e\xc7\xf2\xfb\x6f\x86\x4d\xe5\xc6\x6b\xbe\x68\x5f\x35\xfa\x46\x0d\x2d\x08\x56\xad\xbd\xf0\xd7\xde\xe2\x7c\xb5\xcc\xc7\x14\xe2\x1d\x8d\x31\x76\xaa\xb6\x12\x88\x55\x51\xa9\x3d\x16\xf3\x4e\xb6\xcc\xd8\x09\x1e\x03\x85\xed\xe2\xe7\x38\xe3\x5f\xc4\x46\xe7\x65\x27\xcd\x2c\xde\x65\x68\xb8\xe9\x92\x0b\x6a\x66\x8b\x46\xd1\x04\x48\xb8\x58\x9c\xce\xf1\xb1\x11\xde\x2e\xce\x17\xd7\x47\xf0\xcb\xa7\xeb\xd9\xaf\x47\x70\xb3\x9c\x2d\x6f\x4c\x7c\x0b\x4d\x15\x6b\xc7\x9d\x6a\x19\x15\x92\xb5\x32\xfa\xa5\xac\xb8\xd1\x2f\xe5\xb7\x0d\xfe\xb2\x58\x23\xcc\x26\xb5\x89\x53\x54\x3b\xc8\xc8\xf8\x22\x04\xc3\x51\x80\xe4\x35\xb2\x3c\xc9\x29\x64\x22\x48\xcd\x26\x1e\xc9\x34\x4d\x31\x47\x1b\x9a\xa0\x51\x1c\x2f\x79\xf2\x19\xb6\x39\x26\x27\xf4\x71\x5b\x0d\x33\x5a\x02\x07\xc2\x86\x7c\xc3\xa3\xf5\x92\xef\xf6\x21\xcb\x8c\xd6\x3f\x82\x41\xbc\x35\x34\xcf\x1b\xef\x34\x92\xd0\x5c\x9f\x66\xd2\x54\x2b\x0a\x17\xd7\x4b\xb1\x2b\x44\x6c\x27\xfe\x78\x5b\xa9\x8f\x4c\x2d\x0c\x1d\x5a\x49\x0e\xfd\xdc\x95\x2a\x48\x21\xdd\xc6\x87\x70\x2d\x0d\x3c\x86\x12\xf3\x6a\x49\x13\xa5\x24\x69\xf7\x71\x98\xdb\x04\xf3\x73\xa3\xb4\x09\xdd\xfe\x7f\xff\x08\xd9\x1d\x0f\x3f\x89\x86\xf9\xe7\xad\xa9\xa1\xb5\x16\xa8\x21\xe1\x69\x1c\x3e\xf0\x82\x17\x95\xd2\x5e\xbc\x48\xd5\x4a\x7d\x02\x26\xeb\xeb\x2e\x88\x66\x14\x10\xda\x76\x18\x5c\x74\xde\x8b\x1f\x0a\xcd\x83\x79\x18\x3f\xf2\x04\xee\xe2\x83\x32\x89\x22\xcc\x78\xb9\x65\x5a\xcc\x4b\x1e\xad\x9e\xf2\x53\x46\x90\x96\x23\xe1\x48\xfa\x8f\x71\x51\x57\x63\xdf\xa9\xbb\xa7\xdc\x2c\x55\x7a\x3e\xee\x82\x28\xd8\x1d\x76\xb5\x70\xbd\xca\x8e\x65\xce\x04\x1e\x52\xae\xec\xea\xf5\x03\xb7\xd2\x73\xbe\x8b\x13\xe0\x5f\x98\xa8\xe6\x11\x04\x18\xfb\x6d\x91\x65\x7c\x7f\xb8\x0b\x83\x74\xcb\x45\xeb\xad\x38\xf0\x07\x21\xf6\xfb\x57\xa2\xda\x87\x8c\x4b\xfc\xdb\x54\xe4\xed\x2e\x88\x3e\x69\x90\xf4\x6a\x26\x15\x50\xf2\xf7\xa6\xea\xf4\xbc\x16\x32\xc7\xc4\xa3\x14\x13\xc5\x19\xec\xd8\x67\xd1\x0e\x51\xca\x2b\x1d\x2f\x8b\x4c\xbb\x48\xd6\x50\x7d\x3f\xcb\xa4\x0f\xa5\x7a\x4b\x89\xbf\x1b\x79\x8a\xed\xc3\x38\x5b\x8a\xd9\xf0\x2c\xf3\xe6\x2a\x7f\x1b\xae\x2c\x75\x2f\x2b\xde\x66\xec\x01\xa4\x53\x78\x94\x13\xa9\xec\x7e\xd1\x64\xe6\x33\x29\x07\x17\xd5\xd1\xa3\x7b\x1c\xbd\x3a\x5f\x2c\x3f\x2d\x7f\xbd\x22\x9c\x49\x01\xce\xcf\x2e\xe7\xf2\x34\xfa\xf6\xe7\xf9\xe9\xa7\xd9\xf5\x7c\x56\xfd\xdf\x9b\xd9\xf5\x11\xbc\x9f\xcf\x96\x17\xb3\x2b\x33\x0c\xc6\xdc\x0a\x7c\xac\xaf\xb6\x61\x59\x51\x6b\xc3\x9f\xd6\x3f\x0d\x59\xe4\xcd\xcc\xcc\x60\x7b\x5c\x34\x92\xc9\xfd\xa9\x79\x9e\xc1\xcc\x19\x44\x1c\x96\x61\x93\xd2\xf3\x4c\x1d\xca\x4b\x7b\xce\xe3\xb9\xab\xb1\xd8\x96\xe4\x62\x2f\x37\x14\x73\x57\x92\x3a\x8e\x7a\x9f\xc4\x3b\x39\x9b\x6e\x32\xb6\xfa\xbc\x4e\x82\x07\x9e\xe4\x91\x36\x53\x98\x5d\x9d\x19\x86\xc6\x44\x87\x5b\xc9\x88\x81\x34\x49\x41\x78\x06\x06\x00\x32\x50\xaa\xeb\x51\x40\xae\x48\x63\x64\xa8\xb2\xb0\x67\x09\xdb\xf1\x8c\x27\x69\xd3\xc0\x89\x30\x0e\x63\xfd\xa1\xcd\xaf\x2e\xc5\x73\x9c\xc7\xa9\x33\x2e\x40\x09\xe5\x03\x75\xaa\x84\xe2\x92\x8c\x1c\x5e\xa0\x09\xe6\x43\x4c\xa0\x36\xf5\x08\xa3\xd6\xab\x31\xe0\xde\x3c\xc1\x9a\xdf\xb3\x43\x98\x1d\xe5\x71\x9a\x1e\x29\x79\xa1\xea\x0b\x51\x03\x66\xfe\x20\xe3\x83\x16\x0e\xf1\x48\xa9\xb5\xd0\x18\x3b\x71\x83\xdb\xb7\x72\xaf\xe0\xf3\x7f\x54\x29\xc7\x1e\x02\xfe\x58\x5e\xda\xc6\x34\xdb\xdd\x87\x36\x96\xc1\x2e\x38\x95\x7a\xdc\x24\x4a\xb2\x0d\x54\xa5\x1e\x83\x70\x55\x68\x99\x15\x49\x93\x0e\x06\xad\x22\x08\xae\x87\xb9\xd2\x84\xae\x22\x48\x7c\x9e\x60\x57\xc5\xbb\x06\x43\x5e\x11\x24\x96\x41\xb2\x86\x03\x5f\x11\x24\x17\xa1\x93\xc6\xc2\x5f\x11\x44\x2b\x86\x6f\x28\x08\x16\x41\x68\x2b\x6c\x96\x26\x14\x16\x41\x68\x7f\xf0\x2c\xb9\x26\x12\x24\x3a\x0d\xa1\xa5\x9e\xf1\x40\x5a\x94\xa1\x9a\x4e\x11\x4e\x4b\x27\xbb\x1b\x54\x8b\x22\x92\x96\x72\xcf\x2e\x18\x97\x7a\x5c\xae\xe6\x56\x81\xb9\x5a\x12\xa7\x3f\xb4\x58\x87\xe5\x52\x8f\x51\x70\x2e\xca\x5e\x11\x8f\x84\xe8\xa2\xae\xe0\xac\x3f\x50\x17\x69\x4b\xab\x42\x7b\x69\xc2\x75\x11\x25\xb6\x03\x7c\x35\x83\x76\x11\x84\xf6\x87\xf9\xca\xb5\x3d\xb4\xbd\x77\x20\xd8\x17\x29\xdf\x1a\x38\x0e\xf9\xa5\x9e\x69\x32\xae\x8d\x86\xff\x22\xf6\xbd\x2e\x83\x59\xc1\x00\x51\x44\x6a\xc3\x86\x55\xa1\xc0\x28\x87\x8b\xc1\xe0\x61\xcd\x86\xa0\x8c\xd5\x9e\x10\x62\xb5\x91\x46\x39\xd4\x0e\x06\x12\x53\xc1\xc1\x08\x62\xdd\x87\x13\x53\x8f\xfb\xa0\x62\xea\x71\x0e\x3c\xe5\xd5\x9d\x00\x7b\x52\x4f\x87\x44\x6a\x86\x19\xa3\x75\x59\x23\x30\x99\x3e\xd8\x18\xb5\xaa\xdd\xf0\x64\x35\x1a\x8a\x32\xd9\x06\x83\x94\x15\xba\x7a\x82\x60\xf7\xa1\xca\x1a\x72\x5d\x06\x2c\x6b\x08\xee\x0d\x5b\x66\x29\x32\x0f\x74\xa6\x0f\x5e\x46\x97\xdd\x0c\x77\xa6\x0f\x61\xe6\x44\xfa\x8f\xaf\x74\xd2\x5f\xd9\x48\xd7\x84\x3e\x73\x12\xce\x4c\x3d\x94\xdc\x87\xf4\xd0\x66\xad\xd2\x84\x9c\x7c\xe4\x30\x67\xed\xc2\xe8\x44\x82\xe4\x90\x67\xed\xc2\x94\xbc\x7e\x16\xe1\xcf\x74\xc5\x29\x39\xf9\xac\x43\xa1\xb5\x84\xd8\xe4\x07\x74\x10\x16\x6d\x40\x0c\x36\x3b\x9f\x83\x10\x69\xfd\x62\xd0\xb9\x02\x5d\x85\x4b\x6b\xc9\xb2\x4d\x3d\x48\x0e\x9d\xa6\x1e\x5a\x00\x35\xf5\x38\x0a\xa3\xa6\x9e\xd1\x60\x6a\x84\x95\xbe\x08\xbf\x36\x14\x52\xcd\xf2\x3c\x78\xa2\x0f\xac\x46\xba\x70\xb5\x42\xb1\x59\xc5\x35\x52\x4f\x5f\x40\xb6\x32\xc8\x1a\xe5\xcc\xe6\x38\x2c\x9b\x7a\xc6\x82\xb3\x51\x15\xf2\x43\x21\xda\x68\xaa\x86\x7a\x50\xb7\xbe\x40\x6d\x76\x57\x59\xa5\x10\xe9\x84\x6b\xa3\x54\xd7\x7d\x80\x37\xf5\x8c\x86\x79\xb3\xb8\xc4\x0e\x05\x7b\xa3\x8c\xd7\x56\x78\xb8\x76\xc8\x37\x4a\x4d\x7b\x82\xc4\x95\x23\x97\xa2\xcf\x1f\x08\x15\x47\x8c\xc0\xa6\x1e\xd7\x01\xe3\xd4\x33\x1e\x36\xce\xc2\x36\xb7\xee\x0b\x1e\x47\x90\xd9\x5c\x00\x34\x21\xe4\x88\x9a\xac\x76\xec\xcb\x56\x20\x39\x92\xe6\xa9\x3f\xf4\x1c\x9c\xe1\x72\x2b\xa8\x67\x82\x00\x74\xea\xb1\x0e\x43\xa7\x1e\xeb\x60\x74\x75\x31\xf4\x90\x74\xea\x21\x04\xa6\x53\x8f\x95\x95\xc5\x36\x48\x9d\x7a\x6c\x42\xd5\xa9\xc7\xa5\xa9\xc8\x22\x6c\x5d\x4b\xde\xf4\x86\x22\xcb\xa0\x75\xea\x31\x08\x5d\x47\x59\xb9\x54\x16\xdc\xbe\x00\x76\x36\x2b\x4c\x27\x19\x4c\x11\xc6\x8e\x6a\x8d\xaf\x07\xbe\xeb\x06\xb3\x23\x49\xed\x86\xbf\x6b\x84\xb4\xa3\x9c\x86\x1d\x06\xc1\x53\xcf\x04\xa1\xf0\xd4\x33\x16\x10\x8f\x62\xde\x2f\x02\x13\xf7\x84\xc5\x23\x88\x6c\x04\xd2\xeb\x06\xc7\xa3\x74\x51\x6f\x38\xbd\x9a\xd1\x8c\xa2\xbd\xee\x0d\xaa\x47\x0a\xee\xaa\x1e\x97\xa1\xf5\xd4\x33\x1a\x60\x8f\x78\x58\x19\x09\xb3\x47\x90\x5a\x05\xe6\xd3\x06\xdb\x73\x68\x1e\xec\x86\xdc\x23\xdf\x2d\x6a\x26\x31\x4d\xe0\x3d\xda\xfc\xef\xb5\x5f\xd5\xac\x51\x34\x9f\x21\x83\x10\xf0\xb4\xb3\xdb\x50\x82\x1c\xb8\x23\x35\xef\x04\xc1\xfb\xd4\x33\x4d\x08\x3f\xf5\x38\x0c\xe4\xa7\x1e\xaa\xd2\x9f\x14\xd4\xaf\x59\x18\x1b\xda\xaf\x59\x1a\x19\xe0\xaf\x59\x98\x16\xe6\xaf\x29\x83\x14\xec\xaf\x29\x82\x66\xb6\xa0\x04\xfe\x6b\x95\xa5\x19\x5b\x68\x41\x00\x9b\xa5\x69\xf6\x12\x62\x40\xc0\x66\x71\x2b\x73\x87\x4d\x70\xc0\xa6\x0c\x5b\x63\x87\x5d\xa0\xc0\x7e\x29\x14\xe3\x82\x5d\xd0\xc0\x5e\x29\x24\x43\x87\x7d\x00\x41\x6d\x7d\x4c\xc3\x08\x56\xcf\x3d\x92\xfb\x01\x27\xf1\xe2\xf3\x50\x50\x38\x04\x05\x86\x43\x23\xfc\x56\xb9\x6b\x23\x9b\x51\xb5\x81\x79\x6c\xba\x87\x1f\x5e\xaa\x22\x86\xc1\xe7\xaa\x47\xf9\xb0\xaf\x79\x94\x29\x17\x79\xc9\x1f\xcb\x93\x5d\x91\x1f\xa3\x50\xa0\xa1\xf5\xa6\x62\x23\x2f\xa2\xe4\x89\x73\x81\x44\xd2\x30\x5b\xec\x3e\x58\x7d\x6e\x5f\xe4\xbf\x0e\xb4\x73\xa5\xa9\x09\x4d\xde\x44\xfa\x0d\xeb\x0a\x36\x87\x31\x8b\x3e\x8b\x9b\xe8\x1d\x93\xfe\xaa\x64\x6d\xac\x1a\x93\xcf\x85\xc2\xac\x83\x84\xaf\xf0\x60\x17\x38\xd6\x85\x9d\x16\xd5\xf8\xe6\x94\x60\xba\x41\x52\xd6\xd6\x81\xb7\xf4\x7b\xa5\xbe\x3a\xa4\x3c\x87\xbf\xe4\x28\xa2\x5d\xb2\x52\x1e\xf2\x55\xd6\x49\xd6\x04\x7b\x96\xd2\x2c\x83\x49\x7c\xd8\x6c\x55\x4c\x0d\x35\x2a\x5d\x38\x9d\x9d\x9e\x5d\xcf\xd5\x69\xa4\x11\x57\x6f\xb9\xb8\x3a\x82\x37\x8b\xe5\x72\x71\xf1\x7c\x7e\x44\xda\xba\x10\xe4\x2c\x17\x26\x7c\x76\xbb\x94\xfa\x5a\x64\xc1\xe8\xb0\xab\xc6\x23\x75\xd6\x06\x51\xc6\x37\x84\xbb\x7c\x1e\x56\x52\x94\xff\xf3\x9f\x6c\x27\xfd\x65\xfd\x4b\xec\xa6\x91\x98\x45\x3b\x16\x3d\xd9\xda\xc2\xa5\xc2\x4b\xcd\x48\x31\x69\x74\x53\x00\x29\x36\x9f\xd0\x17\x3c\xdb\x7e\x6d\xd2\xf0\xba\x5e\x95\x7f\x89\x85\xb6\x51\x63\x07\x8b\xed\x6d\xde\x1b\x9f\x76\x52\x22\x3a\x81\xb2\x7a\xda\x26\xfd\xa6\x99\x40\x29\x37\x79\x44\x73\x45\x90\xd9\x10\x63\x99\xb0\xad\xc2\x4a\xaa\xa0\x44\x04\x89\x4d\x82\x66\xcf\x72\xb4\xd0\x7a\xa2\xe4\x58\x5e\x5d\xed\xe8\x60\x73\xb8\x98\x2f\xdf\x2f\x4e\x5b\x11\x57\xd5\xdf\x49\xc7\x64\xba\x48\xe9\x8c\x5c\xfc\xf9\xec\xb2\xfc\xf3\xcd\x87\x0b\x0b\xa9\xe7\xb3\xe5\xfc\x66\xf9\x7c\x1b\x56\xb7\x7d\xe8\x42\x88\x2a\x99\xaa\x3d\x2d\x0a\x93\xb4\x50\x55\x8f\xd1\x0b\xab\x0e\x43\x94\x4f\x35\x39\xbc\xbe\xce\x85\x4a\x97\x4d\xec\x9b\xba\x50\x59\x57\xb0\x69\x21\xde\xef\xc3\x27\x60\x79\x07\xd4\x7d\xe8\xb0\x77\xea\xfb\x8c\x27\x70\x5b\x13\x70\x5b\xf3\xcc\xf2\x41\x07\x7c\xd0\x01\xbc\x44\x1f\x74\x40\xff\xf8\xa0\x03\x3e\xe8\x80\x0f\x3a\x60\xf0\xfc\x87\x05\x1d\xd0\x1d\x0d\x7c\xf4\x01\x1f\x7d\xc0\x47\x1f\xf0\xd1\x07\xf0\x22\x7d\xf4\x81\xea\xf1\xd1\x07\x54\x75\x7d\xf4\x01\x1f\x7d\xa0\x21\xd7\x47\x1f\xf0\xd1\x07\x86\x1f\x1f\x7d\x80\x50\xd8\x47\x1f\xf0\xd1\x07\x46\xc5\xf8\xe8\x03\x8d\xc7\x47\x1f\xe8\x7f\x7c\xf4\x01\x1f\x7d\xc0\x47\x1f\xf0\xd1\x07\x7c\xf4\x01\x1f\x7d\xc0\x47\x1f\xf0\xd1\x07\x06\x1f\x1f\x7d\xe0\x3f\x26\xfa\x80\xce\x62\xe4\xc3\x10\xf8\x30\x04\x3e\x0c\x81\x0f\x43\xe0\xc3\x10\xe0\x1e\x1f\x86\x00\x7c\x18\x82\xf2\xf1\x61\x08\xe4\xe3\xc3\x10\x50\x65\xf8\x30\x04\x94\xd2\x3e\x0c\x81\x0f\x43\x30\x2a\xe5\x5f\x37\x0c\x41\x3b\x09\xe9\xb5\x38\xfe\x7d\xdd\x4c\xa4\xb2\x0a\x78\x61\x13\x5d\x68\x71\xb5\x69\xdc\x52\xaf\x34\xc9\x48\x19\xc8\x03\x36\xdc\xf1\xec\x91\xa3\x34\x64\xd9\x63\xdc\xcc\xe1\xa8\xa2\x20\x98\xef\xa0\x34\x08\x62\xcd\xa3\x78\x17\x44\x2c\x8b\xbf\x52\x40\x82\xd3\xaa\x02\x34\x31\x53\x0e\x0c\x6a\xe5\x3a\x26\xb9\x5a\x33\x17\x87\x72\x39\x50\x70\x07\x24\x7c\x2e\x5a\xa0\xe4\xa3\x05\x3b\xa4\x86\x4a\x64\x81\xcd\xa8\x02\x77\xf9\x69\xe1\x2b\x8c\x2e\x9b\xba\x4e\x91\xb7\x16\x4c\x72\xd7\x52\x34\x28\x52\xf3\x3d\x92\xbf\x96\xa8\x99\x71\x96\xc3\x16\xac\xc6\x3f\xb8\xc0\xca\xc0\x5e\x3b\xee\x1c\x2f\x83\xc9\x10\x33\x18\xc3\xcc\xa8\x32\x73\x38\xad\x07\x35\x23\x4a\x2d\xf5\xc8\xcf\x89\x9b\xc1\x24\xc8\x19\x8c\x60\x67\x44\x91\x1a\x74\xcb\xda\x5b\x1f\x94\xae\xa8\x09\xac\xd5\xf0\x33\xa2\xc8\x1e\x68\x4d\x22\x68\xee\xbe\xde\x0a\x5c\x83\x49\xe0\x35\x18\x03\xd8\xa8\x83\x34\x1d\x80\xd8\x88\x32\x8b\x66\xd4\x81\x6c\x96\x22\xfb\x1b\x96\xbe\x3a\x44\xae\xac\xbc\x6e\xa0\x36\x98\x60\x2f\x71\x00\xb7\xc1\xd7\x3d\x66\x39\xe2\xdc\x60\x42\xd6\x0d\x26\xe2\xdd\x60\x1a\xe6\x0d\x46\xb8\x37\x72\x4d\xfb\xd9\x37\xea\xba\x97\x13\x73\x1a\xfe\xcd\x6a\x39\x29\xa8\xb9\x06\x03\x47\x6d\x49\x1d\x39\x57\x70\x70\x44\x99\xbd\xf4\x1c\xcd\x9f\x11\x72\x23\xb3\x73\x82\x0e\x86\x29\x3a\x6a\x55\xa3\x75\x9f\x85\x8c\xda\x9e\x5a\x9b\x60\x49\xd3\x11\xa5\x96\x0c\x9e\x86\xa8\xa3\x7e\x7a\x1f\x87\x47\xb3\x33\x8a\x67\x80\xc5\x73\x32\xe2\xdb\x3c\x1e\x51\x68\x45\xf1\x4d\xc6\xe4\xc1\x28\x97\x47\xbe\xbe\xb4\x8c\xa0\x4e\xd9\x3c\x98\x94\xcf\x83\x61\x46\x8f\x28\x31\x8a\xb3\xe9\x38\x3d\x98\x8a\xd5\x83\x51\x5e\x8f\x28\x34\xa7\xfc\xfa\x99\x3d\xa2\xdc\x1a\xe9\xd7\xc7\xed\xd9\x49\xce\x69\xbf\x26\xbb\x67\x27\xb2\x46\xfc\xe9\xf8\x3d\x3b\xe1\x4d\xea\x4f\xcf\xf0\xd9\xbd\xa1\x49\xfe\xe9\x39\x3e\x67\x6f\x78\xf5\x63\x97\x67\xac\x58\x3e\xcb\x41\x63\x4f\x00\x02\xd9\x0f\x00\xac\x49\x40\xb0\xa6\x01\xc1\x96\x08\x04\x5b\x2a\x10\x6c\xc9\x40\x70\x40\x07\x82\x3d\x21\xd8\x15\x41\xb3\x51\x6b\xe4\xd0\x4c\xe6\xe0\x8e\x16\x04\x77\xc4\x60\xaf\x28\xbc\xd5\xba\x4f\x14\xc1\x8c\xde\x27\x8a\x60\x4b\x07\xc7\x04\x21\xb8\xa4\x08\xc1\x9e\x24\x04\x4b\x9a\x10\x5c\x13\x85\x30\x4a\x15\xd2\xb5\x19\x13\x91\x85\x30\x19\x5d\x08\x63\x84\x21\xf5\x44\x98\xf0\x5e\xca\x90\xaa\x79\xce\xd9\xc4\x5e\xd2\xd0\x56\x6f\x70\xa2\xa5\x0d\xed\xef\x7b\x1a\xe2\xd0\xe6\x48\x5c\x39\x6a\xb7\xa8\x43\x1b\x3d\x79\x97\x55\xa4\x1a\x56\xa1\xba\xdb\x39\xe3\x15\x61\x4a\x66\x11\x46\xb8\x45\x17\x17\xfe\x2e\xbb\x48\x5f\x63\xd4\xa7\xf6\xf3\x8b\x54\xc9\x2a\x8d\x48\x1f\xc3\x48\x55\xf7\x14\xcc\x6e\x1f\xc7\x68\x71\x57\xae\x9c\x8a\x75\x2c\xa3\xc5\xc2\x55\x63\xf6\x06\x78\x46\xfa\x50\x6b\x56\xbe\xc6\x34\x3a\x58\x6b\xf4\x5c\xa3\xc5\x98\x28\xe3\x1e\xba\x00\xcf\xa1\xe6\x42\x53\xec\x0a\x2d\xbe\xd1\xc9\x74\x73\xaf\x49\xaa\x93\x91\xd6\x41\x20\x81\x1e\x08\x52\x3d\x8e\x08\x49\x70\x46\x49\x82\x0d\x29\x09\x2e\x6c\x78\x6e\x88\x49\x70\x42\x4d\xc2\x04\x46\x49\x6b\x7a\x12\xbe\xae\x49\xd2\x09\x48\x09\x93\xc1\x94\x30\x02\x54\xd2\x55\xe7\xbd\x50\x25\x75\xa6\xe4\x28\xa6\x1e\xac\x74\xe0\x82\x71\xd4\x82\x2b\xc9\x96\x9e\x36\x92\x69\xc5\xa6\x82\xd2\x33\x6b\x58\x1f\x69\x45\xa6\xaa\xc4\xdd\xa3\x99\x30\x0d\x9e\x09\xc3\x88\x26\x51\xa2\x0c\x64\xd9\x87\x69\x52\x4f\xea\xf7\x45\x50\xef\x1e\x54\xd3\xde\xc0\xdb\xc1\x35\xff\x33\x0c\xbc\x8e\xc3\xa4\x42\xfb\xdc\x54\x3a\x51\xd6\xf0\x4d\x0b\x5f\x89\xb6\x31\xb6\x63\xa0\xb6\x1b\x60\xbd\x26\x55\x6a\x9f\x39\x86\x3f\x61\x04\x00\x25\xaf\x2d\xff\x2e\x16\xd3\x26\x0c\x4a\x6f\xe1\x86\x9b\x9b\x1e\x08\x75\x55\xe5\xa9\x8c\xa5\xcf\x6e\x46\xb2\x40\x4a\xc1\x1a\x2b\x05\x5b\xb4\x14\x1c\xe1\xa5\xe0\x06\x31\x05\x4b\xcc\x14\x2c\x51\x53\xb0\xc5\x4d\xc1\x1a\x39\x05\x4b\xec\x14\xec\xd1\x53\x70\x83\x9f\x76\xc5\xd8\xd8\xd3\x5c\x60\xa8\xe0\x0c\x45\xed\x93\x44\x35\x5b\xb9\x40\x52\x7b\x24\x91\x4d\x69\xae\xd0\x54\x4d\xbd\xf0\x59\xb2\x81\x98\x29\x1b\x1c\xc7\x5e\x22\xa4\x1a\x86\xa9\xb2\x66\xc3\xb3\x66\xce\x06\x93\xec\xd9\x04\x99\x55\xbe\x6d\xfb\x0c\xda\xd1\x61\x27\xae\x36\x5f\x8b\x52\xbd\x2c\x5e\x4f\x13\x32\xa5\x2e\x89\x56\xb5\x8e\x79\xb7\x6c\x60\xcf\xa7\x0e\x3c\xff\x51\x7c\x6a\x39\xb2\x3c\x9d\xea\xe9\xd4\xf6\xe3\xe9\x54\x4f\xa7\x36\x1e\x4f\xa7\x7a\x3a\xd5\xd3\xa9\x9e\x4e\xa5\xd4\xea\x3f\x98\x4e\xd5\x1d\xb2\x3c\x9b\xea\xd9\x54\xcf\xa6\xfe\xa7\x99\x2e\x3d\x9b\xea\xd9\x54\xcf\xa6\x56\xd5\xf6\x6c\xaa\xa6\xca\x9e\x4d\xf5\x6c\x6a\x53\xb8\x67\x53\x8d\x5f\xe3\xd9\x54\xcf\xa6\x76\x64\x78\x36\x75\x50\x94\x67\x53\xcd\x45\x79\x36\x55\xfb\x78\x36\xd5\xb3\xa9\x9e\x4d\xf5\x6c\xaa\x67\x53\x3d\x9b\xaa\x1e\xcf\xa6\xd6\x1e\xcf\xa6\x7a\x36\xd5\xb3\xa9\x9e\x4d\x45\xc8\x7c\x6e\x83\xa4\x27\x53\x3d\x99\xea\xc9\x54\x4f\xa6\x7a\x32\x95\x58\x55\x4f\xa6\x16\x8f\x27\x53\xff\x3d\xec\xa5\x9e\x4c\xf5\x64\xaa\x27\x53\xdb\xe5\x3d\x99\xea\xc9\xd4\x7e\x49\x9e\x4c\x35\x93\xe4\xc9\x54\x4f\xa6\x96\xcf\xb7\x4e\xa6\xee\x83\xd5\xe7\xb6\x22\xe5\xeb\x40\xaa\x57\x9a\x9a\xd0\xe4\x4d\xa9\x5f\xb2\xae\x65\x73\x5c\xb3\xe8\xb3\xb8\xaa\xdf\x31\xe9\x86\x4e\x3e\xa0\xab\x01\x8a\x3b\xca\xd1\x29\xbc\x75\x90\xf0\x95\x0d\x81\xea\x66\x61\x39\x2d\xaa\x41\x17\xf4\xdc\x23\xa5\xac\x32\x49\x05\xd9\xa4\x21\xde\x2b\x95\xe1\x21\xe5\x39\x8c\x2a\x87\x12\x65\x89\x8a\x21\xe5\x21\x5f\x65\x5d\xbb\xf1\x9e\xa5\x94\xbb\x62\xb6\x4d\xe2\xc3\x66\x2b\xeb\x95\x0f\xcd\xd2\x21\xb3\x70\xad\x24\x88\x3d\x3d\xbb\x9e\xab\xb3\xcc\x87\xcb\x9b\xab\xf9\xdb\xb3\x77\x67\xf3\xd3\x23\x58\x2e\xae\x8e\xe0\xcd\x62\xb9\x5c\x5c\xe0\x3d\xe2\x68\x17\x99\x63\x7d\x5d\x08\x72\x96\x8b\x2b\x42\x29\xf5\xb5\xc8\x82\xd1\x61\x57\x8d\x47\xea\xd4\x0d\xa2\x8c\x6f\x08\xdb\xf9\x7d\x9c\xec\x58\x26\xcb\xff\xf9\x4f\xb6\x33\xff\xb2\xfe\x25\x76\xd3\x48\xcc\xa2\x1d\x8b\x9e\x2c\xdd\x4e\xb2\x18\x58\x18\xaa\x19\x29\x26\x8d\x6e\x0a\x20\xc5\xe6\x13\xfa\x82\x67\x5b\x0a\xe5\xec\x72\xa5\xbd\xae\x57\xe5\x5f\x67\xb5\x6d\x54\xdb\xc1\x8a\x7b\x9b\x77\xc9\xa7\x9d\x94\x78\x4b\xb3\xbf\xb6\x7d\x3d\x9a\xb6\x19\xa5\xbd\xe5\x11\xcd\xeb\x41\x7a\x81\xc5\xd2\x45\xa3\xe2\xc5\x1e\xb7\xc1\x6a\x2b\x29\x64\x82\xc4\x26\x22\xb7\x67\x8a\x5d\xb6\x9f\x2d\x39\xb5\x5b\xd7\xa3\x3a\xd8\x21\x2e\xe6\xcb\xf7\x8b\xd3\xe6\xf6\x90\xff\x1d\xd5\x6b\xbf\x28\x3e\xfb\xa5\x12\x75\x76\x59\xfe\x99\xe8\xb1\x9f\x97\x3e\x9f\x2d\xe7\x37\xcb\xe7\xdb\xb5\xba\xed\x43\x17\x42\xd2\xf6\x1c\xd7\xda\xd3\xa2\x30\x41\xd1\x75\x5c\xeb\x31\x7a\x61\xd5\x61\x88\xf2\x0a\x7a\x67\xc9\xd3\x8c\x1a\xb8\xc5\xd5\xfd\xea\x46\x53\x93\x6f\xef\x7e\x65\x5d\xcb\xa6\x8d\x7e\xbf\x0f\x9f\x80\xe5\xbd\x60\xe1\xaf\xc9\xee\x33\x9e\x54\x81\x85\x20\x48\xe5\x72\x78\xc8\xb0\x0a\x74\x8b\xc8\x3f\xb6\x71\x4f\x5c\x9e\x08\x6c\xe3\x9d\x4c\x13\xeb\x64\x30\xce\x49\x19\xb1\x84\xac\xc5\x69\xc7\x38\x29\xbd\x0b\x08\x12\x9f\x33\xbe\x89\xfb\xd8\x26\x43\x71\x4d\xec\x42\x52\xe8\x63\x9a\x74\xa3\x93\x10\x44\x57\xf1\x4c\xfa\x22\x93\x10\x84\x9e\xdd\x03\x73\x1e\x95\xc4\x7d\x44\x92\xc1\x68\x24\xb4\x73\x6c\x7f\x24\x92\xa2\x11\x28\x43\x55\x13\x85\x44\x1b\x4f\xc4\x5a\x76\x37\x96\x08\x45\x24\xcd\x8f\xd2\x45\xe4\x11\x97\xab\xb9\x83\x88\x23\x5f\xed\x70\xe0\x28\xda\xc8\x54\x91\x46\xa6\x88\x32\x32\x41\x84\x91\xa1\xe8\x22\x34\xa7\xb9\xde\xc8\x22\x65\x8c\x10\x82\xd0\x6e\x54\x91\x56\x7c\x10\xda\x4e\x5c\xc6\x57\xd0\xc7\x06\x21\x2d\x8e\x72\x28\xf6\xc7\x05\x21\xeb\x98\xdc\x3b\x8d\x0d\x38\x8c\x01\x23\xb9\xe2\xf5\xc4\x02\xb1\x71\x18\xd3\xc7\x01\xa9\x22\x7a\x50\x8e\x1a\x9d\x18\x20\xfd\xd1\x3c\x28\x63\x35\x19\x8a\xe4\x61\x17\x38\x52\x13\xc5\xa3\x16\x8f\x83\xa6\xe6\x99\x24\x82\xc7\x70\xf4\x0e\xfa\x12\x38\x91\x27\xda\x74\x5e\x68\x03\x11\x3b\x20\x8a\x89\x5d\x36\x4d\xb4\x8e\x49\x22\x75\x0c\x47\xe9\xb0\x50\xf9\xf5\x46\xe8\xa8\xc7\xda\xa0\xcb\x6d\x47\xe7\x68\xc5\xd9\xa0\x0b\x56\x91\x39\x34\x31\x36\x2c\x45\xe6\x51\x39\xf4\xf1\x35\xe8\xb2\x9b\x11\x39\xf4\xb1\x35\x9c\x48\xff\xf1\x95\x4e\xfa\x2b\x1b\xe9\x9a\x48\x1c\x0e\x63\x6a\x50\x75\xb1\x36\xb1\x34\xec\xe2\x68\x58\xc5\xd0\xb0\x8a\x9f\x61\x15\x3b\xc3\x36\x6e\x86\x65\xcc\x0c\x17\xf1\x32\x9c\xc4\xca\x70\x14\x27\xc3\x51\x8c\x0c\x47\xf1\x31\x1c\xc5\xc6\x70\x14\x17\xc3\x65\x4c\x0c\x67\xf1\x30\x2c\x63\x61\xd8\xc4\xc1\x70\x1a\x03\x63\x38\xfe\x05\x8d\xa8\x99\x28\xf6\xc5\x34\x71\x2f\x06\x63\x5e\x10\xe1\xe6\xbe\x78\x17\x65\xe4\x0a\xa2\xa5\x5a\x17\xeb\xa2\x16\xb5\x82\x20\x55\x17\xe7\xc2\xd6\x9e\xdc\x89\x71\xd1\x8a\x56\x41\x53\x35\xd4\xe3\x5b\xf4\x45\xaa\xb0\xbb\xca\xba\x8b\x52\x31\x59\x84\x8a\xa1\xe8\x14\xb6\x97\xd8\x6e\x64\x8a\x2a\xc6\x04\x65\xbc\xf6\x45\xa5\xa0\xbb\xda\xf6\x45\xa4\xb0\x89\xd0\xd2\x17\x8d\xa2\x19\x9a\x81\x76\xd9\xee\x2c\x4f\xad\x98\x12\x94\x56\xed\x8f\x42\x91\xf7\x9e\x85\xa5\xae\x11\x81\xc2\x72\x15\xd0\x47\x9f\xa8\xc5\x91\x20\x6a\xb2\x5a\x8b\x73\x3b\x86\x04\x49\xf3\xd4\x88\x3a\xd1\x8e\x1f\x41\x19\x53\xda\xe9\xde\x8e\x1d\x41\x6a\x00\x27\xd1\xd6\x1d\xc5\x8c\x70\x13\x2f\x82\x1c\x2b\xc2\x32\xac\x82\x8b\x18\x11\xf6\xf1\x21\x5c\x1a\x8e\xac\xe3\x42\x7c\x35\xb3\x91\x93\x98\x10\xd3\xc4\x83\x18\x8a\x05\x41\xb3\xc5\xf4\xc6\x81\x28\x23\x3a\x50\x2d\xf5\xed\x18\x10\xcd\x68\x0e\x24\xa9\xf5\xf8\x0f\x9a\x48\x0e\x94\xb3\x71\xd1\x74\xfa\x28\x0e\x64\xab\xde\x04\x11\x1c\x26\x88\xde\x30\x10\xb9\x81\x1a\xf8\xbc\x2f\x6a\x03\x3d\xe6\x7b\x7f\xc4\x86\x9a\x09\x8d\xa2\xcb\x6e\x47\x6b\xe8\x98\xd0\x48\xde\x2f\x2d\xa3\x5b\xd3\x84\x46\xd9\xbe\xdb\x46\xb7\xb6\xf9\x8c\x78\x74\xd1\x87\xd1\x9f\x59\x44\xc3\xa8\xa2\x33\x68\xe3\x2c\x38\x34\x16\x76\x63\x2c\x90\x6f\x1a\xce\xe3\x2b\x0c\x59\xb3\x6a\xb6\x29\x9a\x3f\x51\xff\x59\xce\x2a\x81\x4e\xaf\x35\xab\x68\x18\xeb\x63\x7c\x19\x4f\xa1\x1d\x19\x81\x64\x2f\xd2\xc5\x52\xd0\x44\x45\xa0\xdc\x95\x4b\x43\x96\xa3\x88\x08\x54\x13\x80\x45\x24\x04\xbb\x28\x08\x56\x11\x10\x5c\x44\x3f\x70\x10\xf9\xc0\x26\xea\x81\x4d\xc4\x03\xab\x68\x07\x76\x91\x0e\x6c\xa2\x1c\x58\x46\x38\x70\x10\xdd\xc0\x45\x64\x03\x37\x51\x0d\xdc\x44\x34\x70\x13\xcd\xc0\x4d\x24\x03\x37\x51\x0c\xdc\x45\x30\xb0\x8a\x5e\x90\x35\xef\x74\xe7\x2c\xda\x1c\xd8\x86\x9b\xaf\xb0\xa4\x2b\x6e\xeb\x6a\xbb\xd4\x57\xc2\x58\x5c\xf3\x6e\xa8\xc0\xfc\xd2\x0f\xf0\x9e\x67\x4d\xf2\xcb\x74\xcf\x39\x44\x41\xb6\x78\xe0\x49\x12\xac\x9f\xb9\x3d\x3e\xd4\xde\x4c\x6b\x04\x71\x6c\x16\xf5\x17\x07\x3c\x79\x20\xcb\x75\xa8\xf2\x28\x86\x18\x61\xb2\xf1\x9a\xb8\x7c\xee\x83\x1c\x29\xe5\x12\xda\x4b\x5a\xd6\x4a\xba\xb3\xc7\xf9\x17\x02\x8b\x9e\xd4\x5f\x4b\xdb\x02\x5b\x49\xf8\x2d\xc2\x29\x26\x8b\x8a\x2a\xaf\x75\xf1\xf9\x8a\x82\x15\x87\x8d\x52\x4f\xc9\x30\x12\x25\x2e\x70\x2b\xea\x75\x6b\x1e\xc0\x22\xe1\xf7\x9f\x1e\x7e\x78\x99\xf0\x34\x7b\xf9\xf0\xc3\xcb\x22\x5c\xc4\x89\x3a\xba\x9f\xe6\x5d\x14\xe3\xe2\x5b\xe4\x2a\xc1\x08\x6e\x2f\x5a\x62\x6e\x87\xc7\x72\xb6\x4d\x78\xba\x8d\x0d\x4c\x98\xe6\x06\xcb\xf6\xd4\x2d\x5f\x31\x52\xae\x35\x3c\xf3\x52\x10\x06\x91\xb8\x33\x24\xec\x31\x82\x6d\x9c\x04\xff\x2b\xc6\xa9\xb8\xe1\x2b\x03\xc3\x68\x2b\x89\x5e\x5a\x6d\x59\x92\x8d\xcd\x6a\x92\x56\x94\xa2\x03\x35\xd4\x78\xa2\x50\x33\xb2\xea\xce\xb8\x7b\xb0\xb4\xd4\x2a\x0e\x4d\x33\xec\xa3\x57\xc4\xb6\xc7\xbc\x78\x15\xae\x20\xbd\x99\xe4\xcb\x8c\x15\x94\x5d\x4f\xf6\x34\x63\x19\x57\xad\x23\xb5\x41\x52\xa7\x5f\xce\x43\xd3\xad\x47\xd6\xa2\xb0\x28\x48\xd8\x5d\xdd\xa1\x19\xe4\x15\x46\x27\xdd\x2a\x92\x6c\xbd\x5d\x9c\x2f\xae\x9b\xd0\xee\x4f\xd7\xf3\x5f\x8f\xe0\xcd\xf9\x87\xb9\xfc\xf3\xdc\xd8\x85\xef\xd7\xf9\xf9\xf9\xe2\xe3\x11\x2c\xae\xc5\x81\x43\xfa\x79\x99\xb9\x74\x99\xdf\xe1\x8e\xbb\x15\x36\x2c\x27\xbe\xca\xf0\xa7\xe2\xd3\xcd\xa5\xce\xcd\x4e\x68\xc7\x79\xf3\x18\xfe\x58\xb5\xa1\xe1\x8f\xaf\x8d\x5a\x01\x19\x3a\xc6\x76\x9a\xe2\x42\xc4\x38\x98\xaa\xf8\x00\x2f\xdd\xe9\x5a\xb6\x51\x3e\x59\x39\xac\x0e\x89\xb4\x67\xa3\xe7\x6c\x59\x9d\x49\xe6\x6d\x4f\x3c\x96\xd9\x9b\xc5\xdf\xe6\x47\xf0\x66\x7e\xbe\xf8\xe8\x7e\xee\xd1\x03\xaf\x1c\xab\x9a\x99\x4e\xc0\xb9\xd9\x44\x09\xd9\x1d\x0f\x9f\x67\x34\x9f\x8b\x57\xe1\x07\xd5\x4c\xd5\xb1\x1c\x4d\xa8\x51\x24\xfb\x1a\xf3\x79\xd1\x61\x77\x67\xa8\x53\x2d\x62\xc2\xac\xe3\x83\xe9\xe0\x6b\xb5\xc8\xdf\x44\xed\xf0\x2d\xb2\xac\xd3\x4e\xcd\x26\xa9\xfe\xcd\x70\x32\xa4\xdb\xf8\x10\xae\x25\x07\xab\xac\xec\x05\x50\x14\xb1\x2c\x78\xe0\x90\xae\x58\xc8\x71\x86\x11\x75\x30\x1f\x39\x46\x07\x3b\x9e\x6e\x83\xfb\xec\xf4\x90\x18\x05\x15\x40\x0c\x3e\xcd\x4d\xb8\xf1\x2a\xdc\xa9\x9a\xc3\x3a\x2f\x57\xc3\xa7\xd3\x7d\xc8\x9e\x80\xe5\x31\x45\x82\xd4\x60\x85\x56\x27\x6a\x98\xd5\xca\xa8\xbf\x83\x34\xd8\x1d\xc2\x8c\x45\x3c\x3e\xa4\xe1\x93\xe8\x91\xc7\xd4\x94\x07\xbb\x4f\xe2\x1d\x64\x8f\xb1\x10\x12\x84\x2c\x39\x0e\x79\xb4\xc9\xf2\xbb\xb9\xd2\x3a\xa7\xf0\x7b\x7e\xb2\x39\x39\x82\x47\xce\x3f\x1f\x8b\x1b\xe2\xb1\xf8\xd3\xa8\x68\xd5\x8d\xe9\x1f\x4e\x9a\x8d\x50\xa8\xb0\xf7\x71\x1a\x88\x11\xa2\x40\x90\x40\x06\xbe\x1e\x95\x19\x47\x61\xdb\xa0\x25\xdb\x20\xe7\x97\xe4\xb5\x5a\xba\xfd\xc5\xf7\x70\x7e\x76\x39\x87\x7d\x68\xe0\x9f\x23\xc6\xc6\xf0\x70\xfb\x32\xfb\x12\x18\x5e\xd8\x8c\xae\x0f\xad\x21\xf6\x8b\x10\x6f\x5a\x04\xbb\x3b\x9b\x08\xef\x8c\xd9\xea\xe6\xd1\x66\xd0\x7f\x01\xf6\x25\x18\x55\xd8\x60\x6e\x2e\xc6\x5b\x08\x72\x03\x21\x6e\x1f\x9d\xb6\x50\xdb\x47\xbe\x54\x9a\x7c\x3c\xa8\x55\x6f\xfa\x4f\xba\x11\xaf\xc1\x14\x23\x8d\x1d\xf9\x16\xc3\x03\x5d\xf7\x38\x27\x1a\x4c\xb5\xc7\x09\xbc\x79\x12\xbb\x04\x3b\x84\xd9\x11\x30\xa9\x67\x60\x66\x5b\xa5\xda\x45\xf2\x08\x14\x5d\x6e\x0a\x6e\xde\xce\xce\xe7\x8d\x63\x98\x91\x58\xb1\x44\xcc\xae\x8f\xe0\x7c\xf1\xd3\xf7\xaf\x4c\xce\x6a\xa6\x27\xb5\xe3\x6e\x8d\x8c\x4a\xa9\x0a\x99\xfd\x54\xd4\x79\xf0\x97\x4f\xd3\x2e\x5a\xbf\x4e\xb9\x68\x99\x08\x47\x2c\x5a\xbf\xfa\x45\xeb\x3f\x67\xd1\xfa\xd5\x2f\x5a\x8d\xe7\x9b\x5d\xb4\xf2\x73\xf0\xa5\x18\x16\xb5\xe2\x3d\xc3\xab\xa3\xdb\x28\x0b\xd7\x7e\x34\x10\x36\x7b\x77\xc8\xd8\x5d\xc8\x8f\x60\x7b\xd8\xb1\xe8\x38\xe1\x6c\x2d\xfe\x1f\x84\xb8\x6a\x72\xf0\x8c\x6d\xf0\x95\x99\x67\xac\xfe\xaf\x42\xf6\x22\x0a\xbb\x0a\xed\x46\xf5\xfe\xfb\x56\xbc\xec\xbf\x6f\xcb\xc8\x4a\xe2\x86\x1a\xef\xb3\x60\x17\xa4\x59\xb0\x82\x55\x1c\x29\xcd\xc7\x4a\xf9\xdb\x27\x71\x08\x2c\x05\x06\x8f\x2d\x8b\x40\x16\xc3\x96\x87\x7b\xd8\x27\xfc\x81\x47\xcd\x0b\x01\x1c\xf6\x6b\x96\x71\x79\x22\x66\xb0\x8f\xc3\x60\xf5\xa4\x4e\xfd\xe2\x24\xff\x98\x04\xd2\xdb\xae\x03\x89\x48\x1f\x11\xc9\x54\xd7\x6b\x59\xb8\x88\x14\x77\xbc\x84\xa7\xfb\x38\x4a\x25\xb6\xf0\xdf\xb7\x3f\xf1\xac\x9c\xa4\xff\x7d\x7b\xd4\x09\x9e\x70\x48\x79\xa2\x5c\xc8\xf9\x97\x3d\x5f\x65\x6a\x79\x96\x0e\xe0\x5b\x96\xc9\x96\xaf\x04\xff\xfd\xc0\xd3\x4c\xc9\xfd\x20\xbf\xa0\x26\xba\xfd\xf1\x3c\x4a\x0f\x32\xf8\x92\x72\xc5\x0b\x92\xc2\x45\xab\x08\xd1\xd4\xda\x0d\x14\x21\xc3\x93\x54\xc2\x3c\xdd\xab\x69\xf9\x2a\xd1\xec\xf7\xc1\xe6\x50\xe4\xa8\x95\x36\x2c\x69\xf9\xc9\xaf\xbd\x51\xac\xee\x32\x2c\x15\xfd\xb7\x3e\x48\x9f\xd5\x75\x51\xbc\x21\x74\x95\xf0\x0e\x33\xf4\xe5\xf8\xf3\xe1\x8e\x27\x11\xcf\x78\x7a\x1c\xec\xf2\x11\xda\x1a\x35\x9b\x24\x58\xab\xd5\xae\x3b\x2e\x3b\xfb\x76\x87\x54\x2b\xca\xea\x7e\xd3\x5a\x58\xb5\x3f\x6e\x8c\xd8\xb7\x71\x24\x23\x68\x05\xa9\xb4\x4b\x45\x1b\xbe\x2e\x02\x47\xdc\xb1\x34\x58\x41\x28\x8b\xab\x9e\x48\xf8\xf1\x7d\x28\xae\xa2\x4c\x0c\xc8\x7d\x6b\xcd\x0f\x83\x54\x5a\x42\x83\x48\xa9\x41\x82\x38\x62\x21\xf0\x90\x2b\xc7\xd7\x30\xf8\x2c\xba\x4f\x2e\xeb\x32\x0e\x52\x10\x36\xad\xc4\xea\x23\x44\xff\x84\xc1\x2a\x6b\x6c\xd8\xc7\xb0\x8b\x53\x16\xac\x3a\xdf\x72\x0c\x49\xfc\xa8\xf9\xdb\x55\x6d\x47\xa9\xfd\x43\xdf\x71\x40\xfd\xbc\x73\x46\x18\x0a\x10\x3c\x14\xfe\xb7\x6b\x5a\x11\xd2\x5b\x3f\xea\xec\xef\x4a\xcf\x24\x5a\x30\xaf\x8e\x72\x68\x56\xc1\x4e\x65\x38\x9b\x80\x3f\xbe\x10\xf7\xe2\x75\xb6\x15\x1d\xa6\x62\xb6\x69\x94\x5e\x67\xf7\x10\xef\x82\x2c\x53\x41\xb1\x52\x2e\x67\xdd\xff\xf2\x24\x16\xdb\x5d\xfa\x94\x66\x7c\x57\x6c\x80\xe5\x84\x92\xcb\xd5\xe3\x36\x08\xc5\x54\x8d\xd6\x5c\x06\x6a\x6b\x48\xce\xfb\x4e\xdf\x48\x3a\xa3\x66\xab\x19\xf2\x2d\x7d\xac\x19\x7a\x86\x8f\xb2\x5a\x27\xbc\x1a\xa6\xca\xdd\x7b\xcb\x8b\xe6\xea\x34\x43\x12\x3f\x1e\xcb\xf0\x21\xed\x16\x32\xb2\x52\x9a\x58\x25\x7b\xac\x90\x83\x27\xf0\xf1\xb9\xaa\x6f\xa8\xb1\xc3\xec\x5d\xc8\xa2\xcf\xfa\x43\xc1\xe8\x8d\xa0\xd5\x53\x6f\x84\xa8\xe1\x1f\x8e\x55\xbe\x5f\x44\x4b\x93\x2b\xab\x0d\xe9\x9e\xad\x7a\x14\x36\x03\xcb\x42\xf5\x1c\xc3\x17\x75\x52\xec\xf9\xd7\x74\x15\x27\x7c\xd5\x5e\xc0\xab\x7f\xcf\xf8\x97\xbe\xa2\x61\xbc\x49\xaf\x58\xa4\x3d\xb3\x97\xff\xe6\xa6\xdd\xcf\x07\x5e\x85\x68\x7b\x13\x31\x5f\xaf\x3d\xef\x7a\x47\xc6\xf8\x65\x6d\x2c\xfb\x8b\xd1\x35\x06\x97\xcd\xa5\x35\x5c\x55\x0d\xd4\x6a\xb4\xda\xc6\x71\xca\xd3\x7c\x85\x0e\xe3\x0d\xf0\x28\x53\xa9\x06\xe2\xfc\x60\x35\x74\x45\xbb\xe1\x1c\x7e\x9b\xad\x1f\x58\xb4\xe2\x6b\xd9\xf7\xf0\x7f\x0f\x2a\x24\x66\xbf\x27\x4c\x18\x6f\x36\x41\xb4\x79\xb9\x8e\x57\xe9\x4b\xb1\x21\xbc\x64\xb9\x84\xe3\xbf\xab\xc2\x7f\x18\x7a\xa7\x38\xc2\x36\x6b\x2a\x3e\x64\xc7\xb2\x55\x3d\xb0\xbb\x5c\x67\xab\x98\x36\xb3\x21\x2d\xae\x5c\x40\x8b\x72\x52\x90\x0c\x97\x18\xd6\xdf\xd2\x57\xa3\x82\x68\x14\x27\xfe\x81\x0b\xba\x89\xdb\x4c\x3b\xbe\x7b\x5d\xb2\x59\xdf\xca\x2d\x58\xfc\x5c\xec\xc0\x79\x33\x57\x89\x69\x54\xac\xbb\x50\x26\x95\x10\xb3\x7e\xa0\x45\xee\xe3\xe4\x04\xde\x2a\x03\x67\xf8\xa4\x14\xdc\x65\x46\x1b\xd1\xb2\xe9\x61\xbf\x8f\x93\x2c\x0f\x34\x59\x3a\x78\x0d\xdd\xa0\xd5\xde\xab\x36\xeb\x62\xe7\xce\x77\xbf\x6d\x9c\x66\x85\xfc\xfe\xae\x47\x3a\xe6\xe0\x5c\x72\x46\x9d\x71\x0c\xd5\x0b\x58\x9c\x50\xfd\x3e\xe1\x12\x8e\x5e\x0d\xeb\x78\x8e\xcb\xbe\x7c\x0d\x6f\xc5\xac\x2a\xfe\x77\xc7\x22\xb6\xe1\xc9\xcb\x2b\xd5\x84\x83\x97\x5e\x79\x53\x78\x2d\x87\x89\xe6\x77\xe5\xa2\xe8\x66\x37\xb8\x19\x5c\x63\x8d\x77\x83\x61\x31\xad\xe5\xad\xfc\x04\x48\x0f\xbb\x1d\x4b\x82\xff\x95\xc8\x5e\x2b\x32\xdb\x84\x1b\x35\x69\xe3\x18\xde\xa4\xa1\x0c\x11\xdb\x57\xa5\x96\x7b\x2e\x71\x73\xda\xb0\xc3\x86\xff\x2d\xe0\x8f\x63\x2b\xd9\x88\x6a\xb6\x7d\xe1\x2b\xc4\x8e\x17\x30\x1e\x0a\xe3\x32\x1b\xe3\xe2\xa3\x58\x76\x14\xb8\x28\x2f\xdb\xe5\x20\xc9\x62\x69\x9b\x04\xa6\x3e\x7e\xcc\x47\xd0\x4c\x1b\x1b\xc6\x8f\x3c\x79\x13\x1f\xa2\x91\x88\xe0\xc6\xd6\x79\x84\x55\xbe\x73\x20\x2b\xaa\x32\x58\xaa\xab\xaa\x15\xe5\xe0\x4e\x14\xac\xfc\xd3\xea\x4d\x64\x10\xf0\xb9\x61\xce\xcf\xcd\xc2\x4a\x45\xc1\xc2\x47\xf6\x94\x8a\x4b\xdb\x26\xe1\x2c\x3f\x8e\x44\xe3\xac\x5a\x49\x32\x89\xea\x0c\x9d\x12\x0e\xfb\xfd\xb7\xd2\x03\x1f\xca\xaa\xe0\x7a\x40\x7e\xc2\x33\xf4\x40\xc8\x65\x6a\x1c\xd9\xfc\x46\xa9\x77\x47\x9a\x3f\xdd\xb3\xe4\xb3\x5c\x20\xdd\xaf\x24\x37\x0d\xd9\x0e\x97\x13\x43\xc1\xd8\x35\x45\xb6\xc5\xd8\x9a\x32\xbc\xb6\x83\xba\xad\x94\xf5\x5b\x3e\xed\xfb\x7b\xdd\x6c\x79\xda\x05\xd1\x0c\x93\xb8\xc0\xd8\xb4\xd2\xea\xad\x8b\xce\x7b\xe8\x6b\x50\x1c\xd5\xa3\x1f\xdf\x4b\x5d\x6c\xb4\x7a\x1a\xa7\x14\xaa\x91\x2f\xf5\x7d\xa2\x2e\x7c\x0d\x77\x4f\x39\xae\x58\xc6\x52\xda\x05\x51\xb0\x3b\xec\x8c\xd3\xd2\x2b\x3f\x94\x22\xdb\x9b\xf4\xdd\xab\x1f\x32\x54\xd2\x47\x78\x27\xe6\xd4\x17\x26\xde\x7c\x04\xc1\x18\x32\x5c\x06\x16\x08\x52\xd8\x1f\xee\xc2\x20\xdd\x72\xf1\xe9\x2b\x0e\xfc\x81\x27\x4f\xf0\xfd\x2b\x51\xcf\x43\xc6\x53\x08\x32\x78\x14\x33\x79\x44\x64\x14\x8b\xcb\xd1\x67\x51\xaf\x5c\x27\xae\x38\x17\x15\xe2\x25\xd8\xe4\xcd\xca\x32\x19\xad\x48\x09\x1f\x11\x59\x24\x70\x90\xa1\x8e\xeb\x91\x48\xe2\x7d\xae\xf2\x12\xc2\xf9\x97\x20\xcd\x52\x75\x77\x18\xe5\x29\x18\x6c\x83\x68\xd0\x35\xbf\x39\x01\xa6\x19\xaf\x37\x66\x93\x0c\x1c\xac\x30\xcd\x57\x8d\x9a\x04\x9b\xc6\xc0\xa6\x09\x49\xd4\x40\x2c\xf1\xb2\x85\xd4\x48\xcf\x17\xa0\xd1\x7e\x54\x7b\xca\xaa\xa8\x94\xce\x1c\x78\x35\xbb\xfe\xf9\xd3\xdb\xf7\xb3\xeb\xe5\xa7\xe5\xaf\x57\x28\xcb\xa0\x2a\x7b\x7e\x76\x39\x3f\xca\xff\xfc\x66\x76\x3d\x6c\x1b\x1c\xb7\x0a\x1e\x0f\x56\xc9\xa8\xa8\xa8\x91\xd1\x0f\xdf\xf4\x1a\x10\x4d\x80\x19\xc2\x9d\xdf\x80\xc2\xe8\x9a\x85\xab\xba\x54\xbe\x7c\x65\x30\xbb\xac\x44\x0d\x06\xe3\x15\x34\xf7\xaf\x4d\xf0\xc0\xa3\x36\xef\xff\xe2\x45\xe9\xf2\x2c\x07\x87\x5c\xdb\x06\x44\xb2\x08\xd8\x2a\x13\xc7\x35\x75\x08\xf9\x72\xd4\x7a\x4b\xa0\xa2\x7f\xc0\x9a\x45\x1b\x9e\xe4\xb5\x1c\x5c\x22\xbf\x88\x32\xdd\xe3\x8a\x8c\x20\x51\x88\x29\x5b\x23\xb7\xbc\x25\xc1\x66\xc3\x93\xa1\xf5\xe7\x8e\x87\xf1\xa3\xcc\x6b\xd1\x3a\x8b\x8e\x4a\x1f\x6a\xd0\xfc\xbd\xc0\xee\xe2\x07\x7e\x02\x37\xca\x93\x31\x7c\x12\xbb\x80\xfa\x10\xf9\x2f\x2f\xe5\xdb\xa5\xf9\x34\x89\x86\x97\xab\x9e\x2f\xab\xcb\x29\x33\x68\xf0\x11\xf5\xa6\x7a\x8a\x1e\xc8\xdf\x9d\x77\xc1\x31\x1c\x22\xd9\xc8\x5f\x54\xc0\xe9\xfd\x21\x93\xbb\x4d\xad\xb3\x86\xd6\x69\x21\xe3\x04\x7e\x7f\x9a\x37\x58\x72\xd8\xed\xd3\xe2\x0d\x27\x7f\x00\x98\xa5\x12\xf0\x2f\xf6\x44\x99\x02\x67\x38\xe0\x03\xab\x0d\x1a\x69\xd8\x93\x6a\xbe\x38\x0c\xe3\x47\x19\x04\x30\x3e\x24\xf5\x09\x09\xff\x30\x71\x91\xfe\xeb\xab\x23\x19\x39\x23\xe3\x9b\x38\x79\x7a\x0d\x2f\x5e\x9c\xce\x2e\x7f\x9a\x5f\xbf\x78\x71\x54\x75\x9f\xf8\x6b\xe9\x9f\x2e\xfe\xf6\x9f\x47\x86\xa2\xff\xd2\x11\xfd\x71\x76\x7d\x79\x76\xf9\xd3\x90\x6c\x33\xd1\xdf\x9b\xd6\x5a\x39\xfa\x23\x24\xff\xd1\xb8\xd2\x95\x68\x19\x75\x65\xd0\xc9\x59\x6d\x23\x3d\x73\xf6\xfb\x57\xea\x04\x93\x5b\xdb\x0d\xc7\xac\x18\x85\xea\xa3\xd5\x58\x3b\x2a\x5e\xd2\x98\xbd\xdf\xbf\x82\xbb\x43\x56\xbd\x78\x40\x62\xbd\x4a\x7f\x7c\x05\x0c\xf2\x0f\x6f\x89\x17\xc7\x89\x55\x26\x3d\x7d\xb3\x47\x3e\x18\xb8\x4e\x48\x89\xd6\xf0\x17\xf1\x1f\x58\xfc\x3c\x54\xcf\xfa\xcb\xff\x32\xe4\x39\xd7\xf8\x1a\xf8\xab\xa6\x9e\xe2\x95\x9a\x57\x18\x7e\xb9\x94\x58\x6f\xd7\x93\xfe\x9d\xfa\x5b\xd0\xc4\x8e\x3a\x07\xe2\x4f\x68\x46\x0c\xa4\xa9\x3b\x9e\x11\xf7\x48\xf7\xff\x37\x61\x1d\x6d\x5a\xc0\x14\x6f\x44\x83\x8d\x86\x94\xa6\x11\x1a\x35\x00\x2b\x8e\xbe\xa0\x0b\x33\x52\x30\x45\x13\x2f\x36\x3c\x9a\x68\x04\x25\x1a\xe1\x88\x66\x20\xa2\x21\x82\x68\x08\x1f\x8e\x63\x87\xc6\xc0\x21\x7d\x7e\x98\x42\x86\x36\x73\x04\xc3\x15\x4e\x44\x14\x22\x59\x42\xe3\x94\x1e\x0e\x28\x42\xb3\xa9\x41\x21\x07\xcd\x98\x41\x13\x5a\xd0\xc8\x5f\x9a\x3e\x08\x4d\xfc\xa4\x5d\x50\x81\x46\x3c\x20\x82\x04\x44\x31\x80\x04\xfa\x0f\xcf\xfd\xe5\x5a\xeb\xd1\x71\x6b\x44\xfc\x8d\xb0\x7c\x2d\x53\x9a\x5b\x05\xf6\xd2\xc0\x4e\x07\xb4\x75\xc9\x50\x72\x8f\x33\x72\x1e\xee\x5a\x8c\x3c\xa9\xba\xd4\x19\x4d\x07\x9a\x5f\x21\x7b\x5b\x0e\x37\x19\x5b\x7d\x5e\x27\xc1\x03\x4f\x0a\xd8\x0e\x66\x57\x67\xb6\x16\xb5\xac\x15\x43\xd6\x44\x01\x68\xc0\x8f\xf4\xf6\xce\x88\x4b\x0d\xb8\xe8\x22\xd4\xdb\x1a\xdd\xa6\x7e\x0f\x7b\x96\xb0\x1d\xcf\x78\x92\x12\xe2\x17\x8d\x5b\x1d\x40\xae\xa2\xf7\xe3\x75\x33\x47\x54\x18\x26\xe1\x36\x2d\xf2\x49\x9e\x0b\x19\x91\x55\xda\x79\x47\x62\x5e\xde\xe8\xd7\x3a\xd5\xa1\xf2\x5a\x3f\x62\x26\x61\xf1\x34\xd2\x79\x7d\x90\xe6\xa9\x52\x4d\x5f\x4b\x78\xb9\x3b\x84\x59\xd0\x76\x81\xd6\x3f\xf5\x4a\x88\x25\xa2\x0a\xe7\xff\x10\xf0\xc7\xb4\x58\x58\xfb\x3d\x2b\xea\x0f\x2e\x80\x0c\x29\xc5\xb6\x2d\xd5\x4f\x49\xa5\x6d\x90\x3e\xbb\x4a\x86\x6d\x24\x11\x06\x53\x66\xb7\x13\x60\x1b\x8a\x6c\xa5\xc9\x9e\x3c\xe9\xf5\x48\xa2\xeb\x46\xda\x6a\xd3\x4f\xe8\x4d\x6e\xdd\x93\xaa\xda\x50\x6e\x27\xa1\x75\x7f\x7a\x6a\x43\x89\x45\x12\x6b\x77\x29\xa9\x47\xd2\x50\x77\x93\x4a\x1b\xca\x6d\xa6\x9e\x76\x97\x48\xda\x38\x79\xb4\x79\xb2\xe7\xc1\x84\xd1\x98\x74\x08\xd4\x94\xcf\xd6\x81\xaa\x68\xa9\x9d\xa7\xdc\xa7\x2c\x32\x36\x1b\x65\x69\xae\xe5\x5c\x36\x9f\x8c\x9a\xcc\xcc\x03\x79\x96\x0d\xe5\xca\x6c\xcc\x63\xb9\x95\x9b\x99\x92\x8d\x27\x7b\x37\x9f\xf2\x40\x76\x64\x43\xa9\xb9\x4a\x77\x3c\x23\x72\x91\xdf\xd8\x74\xc2\xf7\x65\x41\xb6\x08\xc8\x3e\x12\x84\x9d\x94\xbe\x97\x45\xeb\xd1\xdc\xc5\x94\xba\x6a\xf2\x15\x8f\x64\x1f\x36\x5d\xa1\x5b\xe1\xd7\x87\x32\x0e\x9b\xce\x85\x4e\xd8\xf5\x9e\x2c\xc3\xe6\x7b\xe8\x78\x66\x61\x6c\x2c\x76\xc3\xf8\xeb\xd6\xb9\x81\x0d\xf3\x01\xcb\x56\x31\x14\xd9\x9f\x03\xb8\x93\xd1\xd7\x50\xa2\x36\xef\x6f\x5f\x16\x5f\xd3\x19\x3b\x12\x22\x9d\x9a\xb9\xd7\x28\x5b\x6f\x3d\xf7\x2e\x4e\xae\x26\x43\xaf\xab\x7c\xbb\xc6\x39\x76\xf5\x19\x73\x71\x6f\x69\xe6\xd5\xd5\x67\xc9\x25\x4b\x7c\xf5\x63\xb7\xe6\x55\x66\x5c\x9c\x58\x62\xfe\x5c\x4c\xc0\x37\x6c\x9e\x5c\x7c\x6e\x5c\x74\x3e\x5c\x74\x0e\x5c\x74\xde\x5b\x4a\xae\x5b\x42\x7e\x5b\x6a\x4e\x5b\x72\x1e\x5b\x8b\xdc\xb5\x16\xf9\x6a\x2d\x72\xd4\x5a\xe4\xa5\xb5\xc8\x45\x6b\x9b\x7f\xd6\x2a\xe7\x2c\x21\xcf\x2c\x36\xb7\xac\x75\x3e\xd9\xd1\x1c\xb2\xcd\x8c\xb0\xa6\xc7\xa9\xf1\xcc\x2b\x76\x59\x60\x47\x33\xbf\xd6\xf3\xb8\x62\x94\x1e\xed\x6c\xaf\xbd\xb9\x5b\xb1\x27\xf6\x13\x57\xf9\x5a\x47\x73\xb4\xb6\x33\xae\x1a\xca\xed\xc9\xcb\xda\x97\x65\xd5\xf4\xfb\xcb\x5c\xac\x6e\x33\xab\x8e\x66\x53\xad\xe5\x46\x35\x17\xa9\xcf\xa0\xda\x9f\x0f\xd5\xb4\xcb\x64\xd6\xd4\xf1\x1c\xa8\xbd\x19\x4d\x0d\xdf\xd3\xc8\x7b\xea\x30\x8b\xe9\x78\xe6\x52\x74\xba\xd5\x9e\x6c\xa5\x3d\xb9\x47\x11\xed\x5c\x73\xa7\x74\x93\x6f\x74\x24\xc7\xa8\xf5\x30\xae\xe7\x15\xb5\xc9\x12\x6a\x91\x19\x94\x9e\x0d\x14\x95\x01\x94\x90\x3d\x83\x9a\xe9\x93\x96\xdd\xd3\x56\x47\x48\xca\xe2\x39\xa5\x86\x90\x9c\x9c\xd3\x20\x21\x67\x23\xbd\xa6\xf1\xde\xd5\x4e\xc2\xd9\x9f\x52\xd3\x54\x64\x9e\x78\xd3\x61\x1a\xcd\xd1\xd4\x99\xe8\x1c\xa2\xda\x74\x99\x83\xc9\x2f\x8d\xf5\x37\x79\x8a\xcc\xd1\x84\x97\xa2\x35\x0c\x85\x0e\x25\xb9\xec\xa4\xac\x44\xf4\x7c\x95\xd8\xd2\x69\x9a\xca\xf1\xd4\x94\x95\x8e\xd3\x50\xa4\x81\x26\xb4\x93\x5c\xd2\xfc\x40\x64\x90\x50\xb2\x96\x1e\x12\xd1\xc0\x1d\x2d\x66\x8f\x0e\xd6\xa1\x06\xb3\x4a\x03\x69\x7c\xca\x56\x0a\x4a\xc7\xa9\x1f\x0d\xd3\x3d\x62\xd2\x97\x0f\xa5\x78\xec\x26\x6c\x34\x9d\x5b\xba\xb4\x8e\x3d\x49\x1a\x4d\xc7\x7f\x27\x95\xa3\x9b\xc4\x8c\x18\xdd\x12\x32\x01\x23\x3e\xe9\x22\x3a\xd1\x22\x35\xb9\x22\x31\xa1\x22\x36\x89\x22\x36\x71\x22\x3a\x59\x22\x3e\x41\x22\x36\x29\x22\x21\x11\x22\x31\xf9\x21\x35\xe1\x21\x3d\xc9\x21\x3d\xb1\x21\x3d\x99\x21\x3d\x81\x21\x3d\x69\xa1\x5d\xa2\x42\x52\x72\xc2\xb1\x60\x5a\xc5\x83\x3a\x8c\xe3\x82\x6b\x15\xcf\x40\x8c\xd7\xdf\x2a\x17\x9a\xbc\xc6\xe6\x79\xe2\x1e\x7e\x78\xa9\x8a\x18\x25\x82\x53\x1e\x41\x6b\x1e\x65\xd2\xc9\xa8\xe6\xec\x5a\xe8\x0d\xca\x68\x50\xdd\x48\xa8\xfa\xa7\x0c\xfb\x94\xc5\x39\xd3\x3e\x52\x68\x1f\xac\x3e\xb7\xaf\x11\xd3\xfa\xfa\x5d\x69\xde\x68\x2e\xc3\xd5\x55\x89\x54\x8b\xe6\xa8\x61\xd1\x67\x31\x46\xee\x58\xda\xcc\xa4\x98\x8f\x1a\xd7\x9e\x75\xff\x92\x79\x9f\x9c\xf6\x8f\x6d\x66\xa8\xf7\xea\xda\x5a\x44\x00\x49\xf2\x0e\xcc\x62\x48\x79\x68\x32\x92\xd5\x53\xef\x6a\x39\x85\xf7\x4c\x82\x7e\x49\x7c\xd8\xd4\x83\xda\xb9\x4e\x0a\xb5\x5c\x5c\x1d\xc1\x9b\xc5\x72\xb9\xb8\xf8\xb6\x72\x42\x2d\x17\x57\x86\xbf\x54\xb5\x37\xf8\x71\x74\xd8\x55\xbd\x8f\x19\xf0\xfa\xd8\xb9\xfa\x67\x28\xa2\xae\xfe\x69\xcd\x91\xcb\x7a\x2d\xf1\x03\x52\x8c\xc7\x1d\x8b\x9e\x9a\x23\x2a\x56\x54\x90\xb9\x8b\x68\xdf\xf0\x33\x28\x9e\x4f\x81\x0b\x9e\x6d\x9f\xcb\x57\xf7\xba\xfe\xca\x6f\x67\x6d\x69\x54\x8b\xb8\xbe\xdc\xe6\xcd\xf9\x69\x27\xa5\xdc\x6a\x4c\x15\x84\x25\x26\x88\x94\x2e\x42\x05\x77\x94\x66\xb9\x58\xda\x26\x32\x64\x9a\x2d\x15\x35\xb4\x11\x7e\x59\x79\xed\xed\x59\xee\x62\x47\x30\x49\xc5\x85\x4f\x6c\x5d\x63\xd0\x21\x3e\x2f\xe6\xcb\xf7\x8b\x53\x7c\x9e\x02\x28\x4a\x2a\x07\x97\xe2\x7f\x66\xbf\x54\x7f\x3e\xab\xfe\xfe\xe6\xc3\x05\x52\xac\xb8\x1b\xde\x2c\xdd\xaf\xa6\xdd\xef\xc5\x15\x44\xdc\xf5\xaa\x36\x41\x16\x30\xbe\xae\x56\xad\x8b\x2b\xa0\x1a\x77\xa4\x8c\x72\xb9\x66\xc9\xd3\xec\xb9\x58\x93\x1b\xcd\x1b\x9f\xff\xfc\x49\xaa\x45\x53\x37\xbf\xdf\x87\x4f\xc0\xf2\x16\xac\x1b\xa8\x81\xdd\x67\x3c\x81\xdb\xda\x5f\x99\x68\x94\xaa\xd5\xca\x43\x21\x1e\x0a\xa9\x3f\x1e\x0a\xf1\x50\x88\x87\x42\xbe\x49\x28\x44\xb7\x8f\x78\x3a\xc4\xd3\x21\x9e\x0e\xf1\x74\x88\xa7\x43\x3c\x1d\xe2\xe9\x10\xed\x5b\x3c\x1d\x52\x3c\x9e\x0e\xf1\x74\x48\xab\xa8\xa7\x43\xcc\xcb\x7b\x3a\xc4\xd3\x21\x9e\x0e\xf1\x74\x88\xa7\x43\x3c\x1d\xe2\xe9\x90\xaa\xfe\x9e\x0e\xf9\xe6\xe8\x10\x9d\xaa\xd0\x63\x22\x1e\x13\xf1\x98\x88\xc7\x44\x3c\x26\xe2\x31\x11\x8f\x89\xe0\xbe\xc5\x63\x22\xa6\xbf\xf7\x98\xc8\x48\x49\x8f\x89\xe4\x4f\x3b\x70\xf4\xb5\xd8\xe8\x9f\x27\x7a\xb4\x7c\x95\x99\x00\x57\x27\xf2\xf1\x57\x36\x8e\xd9\x57\x9a\x00\xd2\x0c\xe4\x59\xa8\x48\x73\x03\xd9\x63\x2c\x1b\x71\xa4\x7b\x1a\xa4\xc2\x88\x99\xd5\xdc\xd7\x6b\xcd\xa3\x78\x17\x44\x2c\x1b\xcf\xa5\x62\xe7\xc3\x77\x5a\xbd\xc8\xbc\xa8\xd3\x5e\xc3\xd4\xa0\xa3\x4b\xad\xb5\x53\x71\xdc\x93\xbd\x38\xbe\xed\x9a\x85\x01\x07\xd3\x50\xe0\x80\x77\xe5\xc3\xb8\x69\x02\xb6\x9b\xc1\x2e\x34\x38\x3c\x47\x77\x63\x2b\x34\x45\xc8\x70\x98\x24\x6c\x38\xb8\x0e\x1d\x0e\xe8\xe1\x05\x54\x6f\x51\xa0\xe9\x91\x9c\x78\x8d\xc2\x44\x9e\xa3\x80\xf1\x1e\x45\xc8\xcc\xe2\x67\xf7\x20\x05\x8c\x17\x29\x42\xe6\x1d\x37\xf4\x24\xc5\xc9\xac\xa9\x0a\x7a\xbd\x49\x91\x12\x2b\xbf\xd3\x5e\x8f\x52\xcc\xb0\xe8\xf8\x9e\x0e\x78\x95\x62\x86\x86\xd6\xff\xb4\xd7\xb3\x14\x21\xb9\xcf\x07\xb5\xe3\x5d\x8a\xaa\x6d\xaf\x1f\x2a\xf1\x4a\xdf\x6d\x03\x94\xae\x9d\xee\x8f\x0a\x8e\xd6\x2f\xa2\x5f\x2a\x3c\xf3\xce\x69\xe1\xa2\x0a\x38\x37\x55\x44\xc7\xd7\x90\xa8\x61\x57\x55\xcc\x60\xaa\x39\xb5\x0e\xba\xab\x22\x64\xea\x1d\x5b\xb5\x2e\xab\x08\xa9\xfd\xce\xad\x4d\xb7\x55\xd4\x8a\x3f\xea\xe0\x8a\xd7\xe6\x82\x4c\x7c\x3a\xe6\xe4\x8a\xd6\xe8\x82\xb9\x56\x17\xb9\x42\xb9\x76\x76\x05\x84\xc3\x2b\x42\x64\x75\xd2\x1b\x74\x7a\x45\x7d\x3c\xeb\x77\x74\x5e\xa2\x1d\x1c\x64\x25\x3b\x2e\xb2\x06\xce\xaf\xa8\x11\x3b\x89\xf2\x19\xcc\x15\xd0\xb8\xc1\x35\xe0\x2e\xdb\x74\x84\xc5\x9c\x2d\x86\x5c\x66\xf5\xce\xb0\x08\xe9\x3d\x6e\xb3\x3a\x8d\x34\xa6\xdf\x86\x5c\x67\x1b\x4e\xb1\x98\x49\x66\xe0\x3e\x5b\x77\x8c\xc5\x8b\x76\xec\x42\x0b\x18\x37\x5a\xbc\xcc\xa6\xc3\xad\xde\x95\xd6\x4a\xea\x8f\xaf\x74\x52\x5f\x51\xa4\x6a\x1c\x6f\x2d\x5d\x6a\x01\x65\xf1\x00\x92\x6b\x2d\x90\xdc\x6b\x81\xe2\x62\x0b\x14\x37\x5b\xa0\xb8\xda\x02\xd1\xdd\x16\x68\x2e\xb7\xdd\x62\xe6\x5a\x7d\x4d\x59\x73\x63\x02\xd8\xb9\xdf\x82\x9d\x0b\x6e\x6f\x71\x33\x3d\x7f\x5f\x71\x43\x03\x43\x5f\x71\x43\x2b\x03\x38\x70\xc9\x05\x5b\xb7\x5c\xa0\xb9\xe6\x02\xc1\x3d\x17\x5c\xb8\xe8\x02\xca\x4d\x17\xb1\x84\x3e\xca\x73\x85\x81\xab\x2e\x42\xa6\xd6\xa9\xb7\xeb\xae\x4b\xbb\x59\xb5\x1d\x7b\x1b\x2e\xbb\x98\x8b\xd5\x98\x73\x6f\xcd\x6d\x17\x21\x76\xcc\xc1\xb7\x72\xdd\x45\xde\x2c\xdc\x39\xf9\x02\xc2\xd1\x17\x73\xd8\x93\x2e\xc1\x93\x39\xfb\x02\xc6\xe1\x17\x73\x54\xcf\x10\x4e\xbf\x08\xb9\x85\xdb\xea\x98\xe3\x2f\xe6\xca\x3a\xe6\x22\x4c\xf2\xc2\x05\x75\x15\x1a\x73\x13\xae\x1c\x80\x11\x72\xc7\x5d\x85\x0b\x27\x60\xe4\x6c\x18\x77\x17\x56\x8e\xc0\xd8\x1b\x96\x3b\x97\x61\x98\xda\x6d\x18\xdc\xba\x0e\x83\x9d\xfb\x30\x58\xb9\x10\x03\xd6\x8d\x18\xa8\x2a\x54\xba\x3b\x31\x90\x5d\x8a\xc9\x95\x75\xe1\x5a\x0c\xcf\xac\xed\x25\x7b\x19\x03\xca\xd3\x18\x63\x91\x49\x8a\xdd\xdd\xc0\xdb\x18\x6b\xe3\x9a\xc0\xe3\x18\x10\x5e\xc7\xb8\x35\xde\xdc\xf3\x18\x21\x37\xf7\x51\x1e\xf3\x3e\xc6\x2a\xfe\x4a\x3f\xe5\x41\x0f\x64\x84\xd4\xb6\xaf\xf2\x90\x17\x32\xb5\xa7\xea\xc1\x19\x9a\x9e\xc8\x38\x93\x6d\xbf\x86\xbb\xa5\xb7\xc6\xe8\xfc\x4d\xfc\x96\x21\xc6\x6c\xef\x86\xbe\xcb\x28\xad\x3c\x8b\x36\xdc\xcc\x7f\x19\x67\xef\x68\xed\xec\x7d\x3e\xcc\x48\x4b\x6f\xae\x8f\x1e\x56\x25\x23\x64\xba\x8f\xba\x00\xe6\x91\x17\xb0\x9a\x5d\x9d\xe7\xb3\xde\xa7\x19\xa9\x91\xee\x8b\xc0\x40\x09\xbe\x00\x06\x01\x18\x26\xd3\x09\x22\x3d\xa1\x81\xe4\x0d\x0d\x14\x8f\x68\xb0\xf0\x8a\x06\xba\x67\x34\x10\xbc\xa3\x81\xe0\x21\x0d\x14\x2f\x69\x20\x79\x4a\x03\xc1\x5b\x1a\x68\x1e\xd3\x9d\x0a\x62\x35\x8e\x34\xcf\x69\xb0\xf2\x9e\x06\x2b\x0f\xea\xbe\xd2\x18\x5d\x23\xd5\x93\xba\xa7\x34\x4a\xcf\x69\xe3\x51\xad\x79\xbf\x59\xf0\x7d\x40\x04\xe0\x07\x07\xcc\xa3\x61\x48\x75\xf8\x56\x82\xf1\x03\x32\x20\xbf\xa1\x48\xb1\x75\xa1\x83\xf2\x47\x87\x9d\x38\x32\x4e\xed\x45\x7d\x59\xbc\xc6\xbc\xa0\xd3\xcb\xa2\xf9\xfb\x3b\x6a\xee\xb2\x85\xbc\xff\x74\xfb\xf9\x26\xfd\xa7\xcb\xae\xf6\xde\xd3\xde\x7b\xba\xfb\x78\xef\xe9\xd1\xc7\x7b\x4f\x7b\xef\x69\xef\x3d\x3d\xf4\xe2\x7f\x47\xef\x69\xdd\xbe\xe9\x7d\xa7\xbd\xef\xb4\xf7\x9d\xf6\xbe\xd3\xde\x77\xda\xfb\x4e\x7b\xdf\x69\xef\x3b\xed\x7d\xa7\xbb\xa5\xbc\xef\xb4\xf7\x9d\xf6\xbe\xd3\xde\x77\x7a\xe8\xf1\xbe\xd3\xde\x77\xda\xfb\x4e\x7b\xdf\x69\xef\x3b\xed\x7d\xa7\xf3\xc7\xfb\x4e\x7b\xdf\x69\x8c\x9c\xc9\x75\xbd\xde\x73\xda\x7b\x4e\x7b\xcf\x69\xef\x39\xed\x3d\xa7\xbd\xe7\xb4\xf7\x9c\xf6\x9e\xd3\xfd\x45\xbd\xe7\xb4\xf7\x9c\x26\x97\xf6\x9e\xd3\x23\x8f\xf7\x9c\x1e\xf9\xe6\x6f\xc4\x73\x7a\x1f\xac\x3e\xb7\x2f\x57\xd3\x3a\x51\x5f\x69\xde\x68\x2e\xc3\xe9\x05\x92\x54\x95\xe6\x10\x62\xd1\x67\x31\x60\xee\x98\x74\xe7\xa9\x7b\xb3\x4a\x69\xe3\x7b\x3b\xce\x61\x75\x1d\x24\x7c\x85\xf5\x85\xa6\xcf\xac\xd3\xe2\x75\xb8\xc2\x93\x77\x52\x59\x2f\x62\x9e\xf6\xf7\x2a\xd1\xd2\x21\xe5\xb9\x5b\xb4\xea\xc5\x2c\x86\x94\x87\xa6\x7e\xe3\xd0\x55\xab\xef\x59\x2a\xfe\x94\xc4\x87\xcd\x56\x4a\xce\x47\x41\xe9\x25\x60\x28\xb8\xf0\x25\x38\x3d\xbb\x9e\xab\xfd\xee\xc3\xe5\xcd\xd5\xfc\xed\xd9\xbb\xb3\xf9\xe9\x11\x2c\x17\x57\x47\xf0\x66\xb1\x5c\x2e\x2e\xdc\x67\xb7\xd5\xbe\xd3\xb0\xec\x72\x71\x65\xf8\x4b\x55\x7b\x83\x1f\x47\x87\x5d\xd5\xfb\x98\x51\x1f\x44\x19\xdf\x18\x6e\x0b\xe2\xc6\xc4\x32\x59\xe6\xcf\x7f\xa2\x4c\x94\xcb\x7a\x2d\xf1\x03\x52\x8c\xc7\x1d\x8b\x9e\x9a\x23\x4a\x5e\xc4\x31\xea\xa6\xbe\xe1\x67\x50\x3c\x9f\x02\x17\x3c\xdb\x9a\xfa\xc0\xdb\x2e\x2e\xd7\xf5\x57\x7e\x63\x0b\x4c\xa3\x6e\xc4\x45\xe6\x36\x6f\xd3\x4f\x3b\x29\xe5\x56\x63\x3d\x22\xac\x33\x41\xa4\x54\x2f\x3c\xca\xc2\x27\x65\x73\x8d\xa5\xa9\xa8\xf4\xf4\x34\x14\xfa\xb8\x0d\x56\x5b\xe9\x00\x2f\x2e\xb6\x35\x77\xd4\x3d\xcb\xfd\x46\x09\x29\x55\xe3\xc2\x45\xbc\xae\xea\xe8\xb8\x48\x5d\xcc\x97\xef\x17\xa7\x8d\x35\xcd\xf0\x05\x79\x49\xe5\x25\x55\xfc\xcf\xec\x97\xea\xcf\x67\xd5\xdf\x23\xbc\xa4\xf2\x12\xe2\x0a\x7a\xb3\x74\xbf\xa4\x76\xbf\x17\x57\x10\x91\xa2\xa8\x6a\x13\x64\x01\xe3\x2c\x4b\x55\xeb\xe2\x0a\xa8\xc6\x1d\x29\x93\x6a\x52\x2e\x4e\x7b\x1c\xd5\x25\x79\xfc\x4a\xc7\x51\x52\x55\x9a\x36\x8b\xfd\x3e\x7c\x02\x96\x37\x63\xc3\x99\x80\xdd\x67\x12\xdd\x30\x59\x1e\x54\x86\x9c\x20\x95\xab\xc1\x21\x33\x51\x51\x21\xa1\x3e\x0a\x73\x65\xbb\xdf\x50\x58\xab\x29\x38\x2b\x63\xc6\xca\x7c\xe3\x7f\x6e\xbe\xca\x98\xad\x32\x67\x22\xcc\xb8\x2a\xbc\x49\x63\x88\xa9\x6a\x91\x52\x86\x12\xfb\x78\x2a\x0d\x25\x65\x28\x71\x10\x26\xd2\x11\x52\x86\x72\xfb\x38\xaa\x5e\x3a\xca\xb4\xfb\x5b\x0c\xd5\x10\x19\x65\xda\xa8\x2d\x03\x01\xd9\x91\x80\x4a\x44\xd9\xae\x2e\x44\x12\xea\xf9\x76\x12\x0b\x0a\x0a\x41\x40\x61\xe0\x50\x13\xfa\xa9\xc1\x34\x19\x9f\x6c\xfb\xc9\xa7\x1e\x9e\xc9\x78\xea\x77\xa9\xa7\x01\x96\x09\x75\xdb\x2f\xea\x64\xc0\x31\x99\x4e\xff\xca\x1a\x3c\xce\x30\x19\xca\xec\xb3\x04\x6b\xed\xbb\x08\xc5\xe5\x14\xec\x92\x29\xb7\x84\x76\xaf\x1b\x64\x96\x5a\x24\x12\xfe\x8a\xd7\x9b\x19\xb8\xa0\x90\x4c\x9b\x75\x94\x55\x2a\x0d\xc7\xe6\x13\xd6\x84\x53\x72\x91\xcc\xdf\xc4\x60\x8c\xb1\x98\x37\xf8\xa4\x61\xea\xc8\x50\xa2\x96\x4d\xea\x23\x8e\x4c\x67\xec\x88\xa5\xb8\x73\x95\x36\x9d\x09\x35\x26\xc9\x88\x34\xc2\xc9\x15\x37\xed\x89\x28\x23\x63\xc2\x48\xcf\x0d\xe1\xde\xd2\xa4\x8b\xf4\xcc\x10\x59\xe2\xab\x1f\xbb\x35\xaf\x78\x21\x9c\x58\x22\x55\x84\xd1\x59\x60\x69\x22\x3c\x49\x84\xa6\x88\xd0\x04\x11\x9a\x1e\xa2\x90\x43\x04\x6a\x88\x4a\x0c\x91\x69\x21\x0b\x52\xc8\x82\x12\xb2\x20\x84\x2c\xe8\x20\x0b\x32\xc8\x96\x0a\xb2\x22\x82\x08\x34\x10\x96\x04\xb2\xa6\x80\xcc\x09\x20\xc9\xf5\x98\x1e\xa7\x0c\xe8\x1f\x3d\xd3\x63\xaa\x5b\x68\x93\x3f\x43\x3c\x0f\x46\x05\x92\x53\x3f\xe3\x2c\x0f\xf6\xc4\x7e\x32\xca\xf1\x60\xfc\x0c\x9a\x80\x82\x13\x86\xc7\x94\xdf\xc9\xa9\x1c\xd3\xef\x9f\x88\xdd\x31\xe6\x76\xe0\xcc\xf4\x40\x69\xce\xec\x94\x9e\x1c\xa6\x5d\x36\xcc\xeb\x8c\x53\x38\x86\xef\x69\x62\x30\x06\x04\x8e\xe9\xbd\xa3\xe0\x74\x0c\xe8\x1b\xc4\x0d\xb1\xb8\x63\x8f\x93\x37\x88\x76\x3e\x72\x4d\xdd\x4c\x4a\xdc\x38\xa4\x6d\x2c\x48\x1b\x3a\x65\x83\x22\x6c\x08\xc0\x0a\x95\xac\xa1\x51\x35\xb6\x1a\x43\x12\x4d\xf3\x7c\xfa\x42\x32\x49\x63\x4e\xd1\x28\x36\xc6\x78\x27\x33\x22\x68\x30\x93\x75\x02\x7a\xc6\x94\x9c\x81\xc0\xf4\x4c\x60\x4e\xcd\x14\x2c\x8c\xb1\x36\x67\x90\x98\x69\x71\x30\x86\x42\x87\x68\x99\x0e\x03\x83\xe8\xf9\x1e\x52\xa6\x97\x7f\x31\x14\xdd\xa4\x64\x9c\xc4\x76\x32\x8c\xeb\x44\x61\x49\x86\x74\xa2\x4d\xe6\x25\x27\x59\x10\x0d\x3c\xca\xbb\xe4\x6f\x77\xa8\xcf\xac\x08\x16\xe3\x33\xf7\x24\x01\x93\x0c\x83\x25\x61\x02\xfb\x99\x05\x4a\x2a\xa8\x15\xd3\xb9\xa5\x63\x5b\x7a\x88\x15\xd3\xf1\xdf\xe1\x5a\xdc\xd0\x2a\x18\x4d\x13\x92\x52\xc1\x13\x2a\x68\x3a\x85\x4a\xa6\x10\xa9\x14\x2c\x91\x82\xa5\x51\xd0\x24\x0a\x9e\x42\xc1\x12\x28\x04\xfa\x84\x48\x9e\x50\xa9\x13\x3a\x71\x42\xa7\x4d\xe8\xa4\x09\x9d\x32\xa1\x13\x26\x76\x74\x09\x9a\x2c\xc9\x9a\xe7\xd8\x73\x16\x6d\x0e\x6c\xc3\x87\x57\x19\xe3\x33\x7a\xeb\x6c\xbe\xd4\xbf\x6c\x50\x44\xf3\xdc\xab\xf0\x8b\xd2\xaf\xf2\x9e\x67\xab\xad\xa9\x11\xf3\x10\x05\xd9\xe2\x81\x27\x49\xb0\x9e\xe8\xfb\x3e\xd4\xde\x60\xfe\x51\xe2\x28\x24\xea\x26\x8e\x02\x72\x1b\xcf\x55\x11\x4a\x7d\x20\x3f\x31\xa7\x20\x46\x86\x40\xed\x42\x1c\xc5\x91\xba\x81\xe6\xd7\x59\x29\x5f\xba\x0e\xc5\x79\xfd\x80\x45\x4f\xf2\xaf\x47\x84\x4a\x55\x17\x5b\x49\x0f\xd6\x48\x22\x18\x79\x85\x94\xef\x8f\xa8\xbc\xf2\xf3\x16\xdb\x59\xa1\x34\x18\x91\xc9\xd4\x2f\x7f\xbb\x15\xef\xbf\x35\x07\x82\x12\x7e\xff\xe9\xe1\x87\x97\x09\x4f\xb3\x97\x0f\x3f\xbc\x2c\xb0\x9c\x13\x75\xc8\x3a\xcd\x1b\x35\x1e\xe7\x85\xf2\xdb\x7d\x04\xb7\x17\xad\xa2\x5a\x43\x65\xc6\xbf\x64\xfa\x11\x33\xea\x23\xd9\x9e\x01\xfc\xcb\xc8\xef\xc6\x6e\xa0\xbd\x12\x5a\x13\x25\x61\x8f\xf9\x08\x16\xc7\xce\x1d\x4b\x3e\xaf\xe3\xc7\x08\xd6\x41\xba\x0f\x99\x52\xa6\xf2\x2f\x99\x38\xfa\x88\xe1\x26\x2e\x69\x03\xf5\x5a\xc5\xd1\x7d\x18\xac\xb2\x1e\x35\xc3\x31\x7c\x79\x7a\xbb\x65\x89\xbe\x62\xc7\x90\x16\xb7\xe6\x9e\x7f\xbf\x0b\x59\xf4\xb9\xe7\xdf\xc2\x78\x93\x5e\xb1\x88\xeb\x43\xc9\x8c\x3b\x48\xe6\x5f\xd7\x3f\xe1\x8d\x26\x7b\xdb\x2d\x49\x09\xed\xfd\x79\x67\x7e\x8b\xb6\x2e\xaa\x92\xdf\x32\xf3\x9e\xe8\x3f\xfb\xe5\xf8\x84\xd3\x8a\xbf\x93\x32\xc7\x7f\x6d\x32\x0a\x95\xac\x41\x05\x86\x86\x0f\x6a\x37\x46\x90\xe6\x1f\x9a\xd5\x2d\xf1\x03\xf3\xb7\x70\x77\x7f\xb7\xb8\xbe\x98\x2d\x9b\x08\xcf\xc5\xec\xfa\xe7\xd3\xc5\xc7\xcb\x23\xb8\x9e\x7d\xec\xb7\xde\x0e\x9f\xa3\x8f\x35\xa2\x07\x7e\x5c\xbc\x73\xe0\x27\xd7\xb3\x8f\xba\x55\x25\xc8\xc2\x9e\x8d\x68\xb4\x6b\x3b\x1b\x6b\xd6\xd3\x66\x8d\x1e\x58\xc8\xff\xb2\x50\x2d\xdb\xf2\xfd\xc5\xed\xf7\x51\xf6\xab\x6e\x28\xe6\x53\xdb\xcd\xfa\xf7\xcb\xc0\x3a\x61\x3c\xf8\x86\x84\xb4\x56\xc1\x95\xf8\x61\xcb\x1c\x31\x90\xf7\xc2\x70\xb5\x1b\x5b\xcf\xb2\xbe\x35\xda\x66\xa9\x1b\xce\x19\x73\x2c\xbf\xea\xa6\x2f\x58\x9e\xc1\x3a\x29\x5a\x4a\x0d\x90\x01\x65\xb2\x11\x13\xd0\x5e\x2c\x6b\x92\xed\x57\x9e\xbc\xf3\x8d\x84\x36\x06\xc3\xa9\x5a\x6f\x21\x56\x85\xa4\x2e\x52\x8c\x7c\xf9\xe1\xfd\xe7\x46\x33\x17\xfc\x5d\x3c\xd5\x99\xf2\x22\x1e\x39\x4b\xda\xb4\x9c\x10\x3e\xaa\x81\xee\xfa\xa9\xaa\x59\x25\xbe\x59\x87\x21\x2d\x4e\xe7\x18\x08\xe9\xed\xe2\x7c\x71\x7d\x04\xbf\x7c\xba\x9e\xfd\x7a\x04\x37\xcb\xd9\xf2\x66\xd8\xef\x66\x5c\x0b\x72\xdc\xa9\xc4\xc8\xcf\x65\x1d\x46\x7e\x23\x2b\x38\xf2\x1b\x59\xfb\x9e\xdf\x14\xf3\x73\x6c\x72\x0d\x3b\x1e\xb4\x51\xe2\xa1\x49\x0f\xc3\x68\xbe\xbc\x6b\x94\xa7\x10\xe5\x28\x1b\xa4\x63\xf3\x01\x69\xe2\xc2\x99\xb5\x46\x4d\x59\x86\x4c\x12\x76\x4e\x8c\x36\xa3\x59\xca\xae\xe3\xf6\x75\x7a\xe0\xb7\xa6\x68\x4f\xc8\x37\x3c\x5a\x2f\xf9\x6e\x1f\xb2\x6c\x64\x8d\x41\x59\xcf\x5a\x23\xe9\xbc\xf1\x9e\x91\xb2\xcd\x25\x61\x26\x4d\x3e\xa2\x58\x71\xe3\x10\x6b\x6b\xc4\x76\xe2\x8f\xb7\xd5\xa5\x7f\x5c\x7d\xda\x49\x88\x21\xc7\x68\xee\xad\x10\xa4\x90\x6e\xe3\x43\xb8\x96\x2a\x6e\x53\x13\xa1\x34\x91\x48\x2a\x68\x1f\x87\xb9\xdf\x76\x7e\xe6\x91\x9a\xee\xdb\xff\xef\x1f\x21\xbb\xe3\xe1\x27\xd1\x0c\xff\xbc\x3d\x52\x04\xa7\x81\x58\xe9\x38\x9e\xc6\xe1\x03\x2f\x68\x18\x29\xe7\xc5\x8b\x54\x2d\x85\x27\x30\xbc\x8c\xed\x82\x68\x86\x03\xb6\xe8\x9d\x7b\xd1\x79\x17\xa6\x83\x9b\xc7\xc6\x30\x7e\xe4\x09\xdc\xc5\x07\x65\x86\xa9\xec\x59\xa3\x6d\x76\x2f\xe6\x10\x8f\x56\x4f\xf9\xde\x1b\xa4\x65\xff\x1e\x49\x57\x0c\x2e\xea\xc7\xd7\x70\xf7\x94\xab\xce\x9f\x4c\x7a\x58\x06\xf9\x08\xa2\x60\x77\xd8\xd5\x8c\x56\x4a\x63\x5f\xc4\x23\x90\x16\x38\x8c\x9f\x8f\xd2\x33\xbd\x8b\x13\xe0\x5f\x98\xa8\xda\x11\x04\xf7\x35\x14\x2c\x85\xfd\xe1\x2e\x0c\xd2\x2d\x17\xad\xb0\xe2\xc0\x1f\x86\xe7\xbb\x7a\xbe\x7f\x25\xaa\x7a\xc8\x78\x7a\x94\xdb\x87\x82\xe8\x93\x06\x7e\xab\xc6\xf9\x78\xb3\x2a\xd0\xaa\x92\x7c\x02\x67\x19\x3c\x4a\x01\x51\x9c\xc1\x8e\x7d\x16\x5f\x1d\xa5\xbc\xd4\x98\x8d\x8a\x54\x9e\x21\xc1\x26\xef\x5e\x96\x49\xc7\x21\x25\xbf\x44\xec\x46\x88\x81\x7d\x18\x67\x4b\x31\x62\x27\x1b\xd5\x57\xf9\x1b\x4c\x4b\x21\x37\x83\x42\xbc\x81\x89\x5e\x77\xcf\x2d\x47\x77\xd9\x97\xa2\x45\x32\x5e\x18\x2f\x47\x3b\x41\x6d\xc4\xdd\x03\xd6\xd5\xf9\x62\xf9\x69\xf9\xeb\x55\xf3\x94\x05\xe7\x67\x26\xa9\x31\x6e\x96\xb3\xb7\x3f\xcf\x4f\x3f\xcd\xae\xe7\xb3\xa3\xf2\xff\xde\xcc\xae\x8f\xe0\xfd\x7c\xb6\xbc\x98\x5d\x8d\x39\x3d\x9b\x18\xa1\x8e\xf5\x95\x1c\x2d\x25\xbe\x61\xf4\x47\xf5\x4f\x30\xfe\xf1\x9b\xd9\x98\xd5\xe8\xb8\x68\x80\xe1\x63\x7c\x73\x83\x37\x1b\xdc\x46\x0c\xf5\xb0\xae\x7c\xa2\x31\x8e\x7b\x4b\xcf\x61\x32\x8f\x52\x2d\x16\x75\xb9\x6c\x4a\xe5\x22\x62\xad\x95\xd3\xe4\x3e\x89\x77\x72\x49\xbc\xc9\xd8\xea\xf3\x3a\x09\x1e\x78\x92\xc7\x6e\x4a\x61\x76\x75\x36\x1a\x72\x09\x81\x4a\x67\xe8\xd0\x4c\xe8\x04\xa8\xbd\xbd\x69\x1c\x56\xcb\x51\x97\x12\xde\xdc\xe8\x66\x55\x0a\xf6\x2c\x61\x3b\x9e\xf1\x24\x45\xdb\x5d\x8a\xc7\x3c\xe9\x2d\x22\xed\x2d\x3e\x37\x29\x3a\xf5\x2d\x21\xf9\xad\x75\xfa\xdb\xc9\xba\x1f\x5f\x11\x4c\xea\x5b\x43\x91\x50\x1e\xa2\x0c\x92\xdf\x1a\xcb\x2c\x92\xe4\x1a\xa4\xbf\x35\x96\x89\x4b\x93\x4b\x49\x94\x6b\x91\x2a\xd7\x3e\x56\xba\x5d\xba\xdc\xe9\x12\xe6\x4e\x93\x32\xf7\xab\x25\xcd\x9d\x26\x6d\xae\x61\xe2\x5c\x94\xc4\x2e\xd1\xac\x0b\xf3\x80\x12\xa9\x09\x09\xe1\x22\x79\xee\x74\xe9\x73\x0d\x13\xe8\xa2\x24\x8e\x07\x89\xc0\x4a\x34\x0b\x15\x81\x1b\xa4\x5d\x40\xd8\x32\xf3\x84\x4d\x22\x5d\x57\xab\x9b\x45\x32\xdd\xe7\xd8\x85\x2d\xd3\xe8\x9a\x87\x91\xc0\xae\x2c\xe3\xa9\x74\x71\xe3\xb5\x1b\x78\x42\x13\x52\x02\x25\x72\x2c\xfc\x04\x3e\xfb\x03\xb8\x0e\x42\xa1\x1e\xa3\x50\x14\xc8\xb5\x4f\x97\x7e\x97\x16\xcb\x47\x3d\x46\x09\x0a\x50\x12\xc7\x5c\xb5\x2d\x42\xff\x8f\x38\x6d\x97\x21\x2a\xd0\x7b\x9f\xc3\x70\x16\xea\x31\x0b\x6a\x81\x3c\x4b\x0c\xa6\xe2\x25\xa5\x3d\x80\x89\x92\xf1\x1a\x7a\x97\x23\xf7\xd2\xf1\x90\x18\x16\xbb\xa9\x93\xc0\x18\xea\x71\x1f\x1e\x43\x3d\x53\x04\xc9\x50\x8f\x71\xa8\x0c\xc2\xfa\xf7\xda\x2c\x60\x06\x4a\x72\x4f\x1a\xdf\x56\xd8\x0c\x8a\xc8\xde\x10\x1b\xba\xe0\x19\x94\x17\x38\x0c\xb4\xd1\x90\xeb\x30\xdc\x46\x9f\x5c\x27\x41\x37\x1a\xc2\x2d\x12\xfa\x62\x13\x78\x50\x93\xfa\x52\xd3\xfa\x12\x13\xfb\x12\x53\xfb\x12\x93\xfb\xd2\xd3\xfb\x92\x13\xfc\xda\xa5\xf8\xb5\x4c\xf2\x6b\x9d\xe6\xd7\x3a\xd1\xaf\x75\xaa\x5f\xeb\x64\xbf\xd6\xe9\x7e\xdd\x24\xfc\x75\x90\xf2\x97\x9c\xf4\x97\x96\xf6\xd7\x51\xe2\xdf\x89\x52\xff\x4e\x93\xfc\x77\x8a\xf4\xbf\x93\x24\x00\x9e\x2c\x05\xb0\x61\x12\x60\x94\xc4\x09\x42\x88\xa8\xc7\x2c\x90\x08\x52\x23\xfb\x0c\xa9\x80\xa7\x49\x06\x3c\x5d\x3a\xe0\x49\x12\x02\x1b\xa6\x04\xa6\x5d\x70\x1c\x86\x24\x51\xcf\x04\x81\x49\x72\xc1\x26\xe1\x49\xe8\x7d\xd5\x1f\xa4\x04\x57\xcb\x66\x40\x93\xc1\x50\x25\xb8\x21\x3b\xc4\x4f\xd7\x03\x96\x58\xa8\xb6\xd1\x0a\x5d\xeb\x64\xc2\xb6\xe9\x84\x09\x09\x85\xc9\x8a\x64\xbb\xa4\xc2\x36\x69\x85\x5d\xe9\xbe\xc9\xa9\x85\x9f\x43\xf3\x6d\x95\x54\xd8\x38\x20\x0a\x56\x09\x6a\x96\x58\x18\xb7\xf6\xd6\x94\xe6\xfd\xc1\x51\x70\xfb\x7b\x3b\x90\x8a\x36\x44\x0a\x5a\xa5\xac\xfb\x6c\x9d\xae\x09\xdd\xa8\x7d\x41\x55\xaa\x70\x29\xd8\x85\x7c\x34\xb4\x0a\x26\x15\x8c\x7a\x8c\x02\xac\xe0\xda\x74\x30\x21\x71\xb3\xcf\x70\x7a\xba\xbe\x94\xc4\x75\x3d\x3e\x61\x90\xf6\x27\x25\x36\xf5\x5a\xac\x3f\xa3\x69\x89\xd1\x12\x9d\x07\x69\x51\x8f\xeb\xd4\xc4\x86\xc9\x89\x69\x76\x74\xd7\xe9\x89\xa7\x4a\x50\x3c\x45\x8a\xe2\x29\x93\x14\x4f\x93\xa6\xd8\x61\xa2\x62\x8a\xa6\x93\x90\xac\x98\x9a\xae\x98\x98\xb0\xd8\x2e\x65\xb1\x55\xd2\x62\x5a\xda\x62\x5a\xe2\x62\x62\xea\x62\x6a\xf2\x62\x5a\xfa\x62\x72\x02\x63\xab\x14\xc6\x76\x49\x8c\x6d\xd3\x18\xdb\x26\x32\xb6\x4d\x65\x6c\x9b\xcc\xd8\x36\x9d\xb1\x8b\x84\xc6\x16\x29\x8d\x71\x49\x8d\xed\x02\x58\xa2\x13\x1b\x7f\x43\xa9\x8d\x27\x49\x6e\x4c\x4c\x6f\xac\x1e\x5a\x92\xe3\xfc\x5b\xac\x3d\xaa\x49\x59\x86\x5b\x92\xdc\x5e\x6d\x2d\x6a\xe4\x38\xfb\xb1\x7a\x28\xbe\xc8\xc8\x4c\xc8\xea\x71\xa3\xab\xc0\x65\x45\x6e\x89\x98\xbe\x27\xf1\xc9\x91\xd5\x63\x98\x22\x19\x79\x5e\x0f\xf9\x2a\x1b\x4f\x94\x8c\x34\x19\x34\x93\x2a\x8f\xa4\x4b\xc6\xd9\x8e\xb0\xa9\x95\xd5\x83\x3f\xee\xd2\xd3\x2c\x17\x12\x4c\x93\x2d\x17\xbf\x37\x4e\xb9\xac\x1e\x42\xe2\x65\xf5\xe0\xd3\x2f\xab\x07\x9f\x84\x59\x3d\xd6\xa9\x98\xd5\x63\x9e\x90\x19\xab\x53\xb2\x48\xcb\xac\x1e\x42\x72\xe6\xfc\xe5\x4e\xd6\x3b\x7c\xa2\xe6\x96\x98\xe9\xd7\x3c\x5a\xbe\x66\xf5\x18\x67\x6d\xc6\xf6\xbb\x34\x09\x9a\xe4\x6e\x46\x6b\x80\x94\xfd\x75\x30\x83\x33\x5a\x66\x6b\xa4\x77\xf3\x38\xa3\x24\x9a\xe4\x7c\x6e\x64\x73\x46\x49\x37\xc8\xfc\x4c\xc9\xe9\xac\x1e\xfc\x5a\x4e\xcc\xef\xdc\x2a\x8e\xbe\x6d\x23\x73\x3d\xb7\x8b\x21\x15\x09\xc8\xbc\xcf\xad\x62\x46\xd9\x9f\xd5\x43\xcb\x01\xad\x1e\xfb\xd3\x3a\x29\x09\x73\x4b\x92\xdb\xf5\xce\xa2\x46\xa8\xe4\xd0\xc6\xdd\x7a\x5b\x2b\x5a\x5f\x1f\x3d\x7a\xa8\x7d\x3c\x7a\x68\xf8\x78\xf4\xd0\xa3\x87\x1e\x3d\xf4\xe8\xe1\xc0\xbe\xe7\x19\x44\xcf\x20\x7a\x06\xd1\x33\x88\xe6\x8f\x67\x10\x3d\x83\xe8\x19\x44\xf5\x78\x06\xb1\x23\xd7\x33\x88\x63\x8f\x67\x10\x07\x8b\x79\x06\x11\x27\xc0\x33\x88\x9e\x41\xf4\x0c\xa2\x67\x10\x3d\x83\xe8\x19\x44\xcf\x20\x7a\x06\xd1\x33\x88\x06\x02\x3c\x83\x38\x49\x95\xbf\x79\x06\xd1\x69\x5e\x76\xf5\x78\x18\xd1\xc3\x88\x1e\x46\xf4\x30\xa2\x87\x11\x3d\x8c\xe8\x61\x44\xfd\xe3\x61\x44\xc3\xc2\x1e\x46\x6c\x97\xf2\x30\xa2\x55\x79\x0f\x23\x1a\xc1\x88\xed\xb4\x30\xd7\xe2\x50\xf3\xfc\xb9\x61\xe4\x6b\x31\x62\xdc\xde\x8e\x4c\x5f\xdf\xb8\xf2\x5c\x69\xd2\xc3\x30\x90\xa7\x42\xb8\xe3\xd9\x23\xe7\x66\x7e\x10\xd9\x63\xac\x21\xde\x8c\xb4\x16\x58\xff\xd7\x35\x8f\xe2\x5d\x10\xb1\x2c\x7e\x46\x6a\xf1\xb4\x7a\x29\x56\xc0\x04\xbd\x8c\xaf\x4d\xc7\x5a\x50\x6b\xc5\xe2\x98\x29\x7b\xdd\xf4\x20\x81\x49\x0a\x04\xb8\xc4\x40\x40\x75\x8a\xc6\x3b\xc8\x03\x6d\x70\x80\x8b\x44\x41\xf0\x7c\x83\x84\x56\xb9\x49\x12\x08\xc1\x44\x49\x84\x00\x91\x48\x88\x72\x07\xc3\x24\x13\x02\xe2\x00\x06\x3b\xcf\x7e\xb0\xd1\x56\x3a\xf4\xf0\x07\x33\x2f\x7f\xa4\xc4\xca\x0c\x91\x0e\x7a\xfa\xa3\xc5\xd6\xc9\x00\xbd\xb7\x3f\x5a\xe4\x33\xd0\x01\x60\x48\x08\xa0\x85\x16\x00\x5d\x3f\x25\x40\x68\x0e\xe5\x65\x3e\x46\x0a\xa0\x05\x2b\xa3\xe7\x10\x2d\x80\x16\xd9\xa2\x0b\x34\xc4\x00\x5a\x64\x3f\x61\x50\x5b\xf3\xf0\x9d\xdf\x4b\x19\x90\x94\x02\xea\x19\x20\x0d\xa8\x60\x04\x48\x85\x5b\x2f\x6d\x60\x29\xd9\x05\x71\x00\xd6\xd4\x01\x38\x5d\x79\xad\xe8\x03\xf8\x2a\xa7\x0a\x6b\x10\x01\x4c\x61\x04\xfc\x08\x89\x47\x80\x04\xda\xb2\xcb\x86\xa1\x04\xe2\x52\x16\xdf\x8f\x82\x09\xf8\x6d\xb3\xf0\xd8\x18\x86\x13\x88\xab\xfa\x7a\x02\x40\x01\x26\x81\x14\x60\x12\x50\x01\xcc\x60\x05\xfc\xa6\xd1\xb5\x71\x34\xed\x16\xc4\x71\x50\x03\x1c\x3a\xd0\x02\x75\x68\xb5\x21\x87\x66\x13\x50\x76\x0b\x2d\xe8\x50\x1b\x5b\xe4\x8f\xd7\xc3\x0e\x0a\x60\x40\x0b\x75\x0f\x3c\xc0\x24\xd0\x03\x18\x82\x0f\x94\x33\x88\x73\xd3\x0c\xe8\xcc\x33\x1d\x00\xc2\xea\x10\x76\xa2\x87\x20\x68\xd5\xec\x42\x13\x35\x5b\x0d\x7e\xd9\x1b\x04\x27\x0a\x18\x02\x2d\xd6\x3d\x3c\x01\xd3\x00\x14\xf0\x1c\x10\x05\x4c\x08\x52\xc0\x84\x30\x85\x5e\xb6\x43\xa0\x02\x1c\x41\x15\x40\xb0\x32\x82\x05\x5c\x01\x16\x80\x05\xd0\x21\x0b\xa0\x83\x16\x40\x87\x2d\xc0\x0a\xb8\x00\x1b\xe8\xa2\x5b\x18\x6b\x25\xd3\x48\xc0\x1a\xea\xc0\x05\x80\x01\x2e\x20\x8c\x5e\x21\x18\xbb\x59\x9f\x10\x94\xf1\xae\x4f\x08\xca\x82\x07\xce\xa0\x0c\x70\x03\x66\x80\x0d\x9c\x01\x64\x40\x03\xdc\x41\x1a\x30\x1d\xa8\x01\x93\xc1\x1a\x60\x06\x6c\x50\x74\x35\xa3\xd0\x06\xfe\x60\xaf\x20\x8f\x71\x70\x03\x7f\x0d\x29\x41\x8f\x7e\x78\x83\x7a\x63\x92\x3a\xe7\x1e\x80\x03\x2d\x53\x07\x7c\x34\x20\x0e\xc2\x15\xa4\x07\xfa\x28\x41\x0e\x7c\x6b\x4e\x0a\x7e\x80\x09\xfc\x41\xbe\x34\x0e\x01\x20\xf8\x13\x7e\x0b\x18\xd1\x40\x20\x84\x99\xa5\x81\x46\xc6\x41\x10\xf4\x7b\x1a\x3c\x86\x16\x06\xc1\x37\x47\x03\x1e\x19\x02\x42\x28\x03\xa2\x59\xe1\x36\x14\x42\x9d\xbd\xcd\x89\xdb\x04\x43\xa8\x1a\xec\xee\xd2\x48\x44\x6a\xa0\xa3\xa1\xb4\x85\x49\x60\x32\xa0\x04\x5c\x40\x25\xe0\x02\x2c\x01\x07\x70\x09\xd0\x00\x13\xb0\x33\x1e\xd8\x82\x26\x60\x09\x9b\x80\x53\xdb\x87\x05\x74\x02\x5f\xc5\xf2\x61\xc9\x9f\x80\x19\x83\x82\x1e\x13\x5b\x15\xaf\xba\x9f\x43\xc1\xaf\x7d\x1d\x73\x47\x2f\x8b\x82\x96\xad\xd8\x95\x51\x1e\x05\x2d\x57\xf2\x2b\xd3\x31\x29\x30\x0d\x97\x02\x86\x6c\x0a\xed\xa0\xe8\x9e\x4f\x01\x23\x46\x85\x66\x5f\x92\x3c\x46\x2f\xa7\xf2\x2f\x62\xff\x21\xf3\x1f\x30\x16\xac\x6a\x46\xe2\x7e\x20\x0f\x58\xa5\x18\x97\x5e\x6e\xc5\x89\xcd\xaa\xcb\xae\x50\x4f\x5d\x95\xb5\x46\xc3\xaf\x50\x46\x42\xaf\x71\xa5\x66\x2c\x71\x7b\x50\xb2\x75\x74\xe8\x8d\x2c\x45\xb1\x88\x4c\xc1\xbe\xc0\x64\xfc\x0b\xb8\x65\x60\x80\xac\xa1\x26\xb1\x30\x60\xc1\xc3\x00\x9d\x89\x01\x6b\x2e\x06\x6c\xd9\x18\x20\xf3\x31\x40\x66\x64\x80\xce\xc9\x80\x05\x2b\x03\x64\x5e\x06\x6c\x98\x19\xb0\xe5\x66\xba\x02\x68\x2a\x79\x3b\x7e\x06\x1c\x30\x34\x7d\x32\xf0\x5a\x70\x3b\x96\xa6\x47\x06\x41\x1b\x6f\xcf\xd4\x68\xea\x82\x49\xf2\x05\xe8\x44\x5f\xe0\x2c\x02\x01\x2a\x15\x13\x7c\x5b\x49\xbf\x60\xaa\xc4\x5f\x60\x97\xfc\x2b\x3a\xec\xc4\x61\xfa\x39\xd9\x99\xcb\xe2\x95\xd8\xe2\x13\x5c\xe2\xb1\x75\xe9\x18\x6f\xca\xf6\xf3\xd4\xcc\xbf\x25\x35\x53\x0e\x10\xcf\xcc\x78\x66\x66\xf4\xf1\xcc\x8c\x67\x66\x3c\x33\xe3\x99\x19\xcf\xcc\x78\x66\x06\x77\xa6\xf0\xc4\x8c\x27\x66\x3c\x31\xe3\x89\x19\x4f\xcc\x78\x62\xc6\x13\x33\x9e\x98\xf1\xc4\x8c\x27\x66\x3c\x31\x63\x5a\xd4\x13\x33\x9e\x98\xf1\xc4\x8c\xfe\xf1\xc4\xcc\xc0\xe3\x89\x19\x4f\xcc\x78\x62\xa6\xa8\xb4\x27\x66\x3c\x31\x53\x7f\x3c\x31\xe3\x89\x19\xbc\x10\x4f\xcc\x78\x62\xc6\xd8\xee\xe1\x79\x19\xcf\xcb\x78\x5e\xc6\xf3\x32\x9e\x97\xf1\xbc\x0c\x78\x5e\xc6\xf3\x32\xa3\x45\x3d\x2f\x43\x28\xe9\x79\x19\xa3\xc2\x9e\x97\xf1\xbc\xcc\x78\x5d\x3c\x2f\xf3\x9f\xcb\xcb\xec\x83\xd5\xe7\xf6\xe5\xf6\xf9\xd0\x99\x2b\xcd\xdb\xb1\x92\x26\xb8\xd8\x5b\x54\xab\x39\xe4\x58\xf4\x59\x0c\xb0\x3b\x26\x9d\x0d\x3b\xf9\x83\x4c\x4f\x28\x14\x70\x60\x1d\x24\x7c\x45\xa3\x5e\x6c\xe7\xe9\x69\xf1\x6a\x8a\x88\x67\xea\xce\xb2\x8e\x36\x09\x75\x5f\xbc\x57\x6a\x95\x43\xca\x73\x00\x46\xf5\x37\xd2\x6a\x95\xf2\x90\xaf\xb2\x4e\xb6\x7c\xd8\xb3\x54\xfc\x29\x89\x0f\x9b\x2d\xfa\x02\x96\x8f\xb0\x8e\xa7\x10\x9c\x9e\x5d\xcf\xd5\x8e\xfc\xe1\xf2\xe6\x6a\xfe\xf6\xec\xdd\xd9\xfc\x14\xb7\xde\x2c\x17\x57\x47\xf0\x66\xb1\x5c\x2e\x2e\x30\x4e\x20\xf8\xc4\x8c\xda\xba\xa2\x24\x2c\x17\x57\xa8\xdf\xab\xaf\x32\x2e\x12\x1d\x76\xd5\xc8\xc2\xcf\xb4\x20\xca\xf8\x06\xb5\xa5\x89\x1b\x2a\xcb\x64\xc9\x3f\xff\x89\x3e\x45\x2f\xeb\xf5\xa6\x0e\x7f\x31\xfa\x77\x2c\x7a\x6a\x8e\x5c\xa9\x52\xc1\xa7\x94\x6e\x0f\x76\xe4\x12\x09\xc5\xe4\xbb\xe0\xd9\x16\xc7\x48\xb9\x59\xf4\xae\xeb\xaf\xff\x86\x17\xbe\x46\x3d\xad\x16\xbf\xdb\xbc\xc5\x3f\xed\xa4\xac\xdb\x9a\xbd\x15\xdb\xf9\x52\x13\xd9\x54\x25\x2b\x25\x1a\x8f\xb2\xf0\x49\x79\x44\xc4\x68\xe3\x6a\xb6\x2d\x8c\xe8\x8f\xdb\x60\xb5\x2d\x31\xa6\x9a\xeb\xff\x9e\x25\x78\x99\xad\xe1\x9e\x63\x40\x54\x63\x5a\x67\x81\xbe\x98\x2f\xdf\x2f\x4e\x1b\xab\x73\xf1\x77\xd2\x43\x13\x25\xbd\x28\x38\xfb\xa5\x12\x72\x76\x59\xfe\x59\x7a\x67\xe6\x7f\x3e\x9f\x2d\xe7\x37\xcb\x69\x17\xf4\xee\xb7\x51\x8a\xa3\x93\xc3\x56\xad\x40\x2a\x86\xcc\x7b\x5b\xb5\x2d\xa5\x98\xea\x06\xa3\x92\x8a\x68\x63\xc9\xd3\x0c\x0f\x39\xdb\x1f\xde\x6f\x34\x6f\xff\x06\x0e\xef\x16\xd5\x6a\x5a\xde\xf6\xfb\xf0\x09\x58\xde\xc8\x75\xc7\x21\x60\xf7\x98\x7b\x68\x49\xd1\x8b\x05\x52\xac\x39\x87\xcc\x5c\x39\x49\x42\xde\xe9\xbc\xb0\x9b\xbd\x90\xce\x09\x1b\x30\xc2\x15\xf3\x8b\xbb\x23\x0f\xf0\xc1\x0d\xde\x17\xbb\x77\x75\xd9\xe0\xe7\x60\x7d\x4d\x38\x5f\xb9\xe1\xa1\xa4\x0e\x32\xbe\x05\xab\x89\x92\x38\xc6\xf7\xaa\x5d\x0f\x25\x72\x88\xed\x6d\xb3\xba\xb8\x21\xd2\xe1\x7a\x5d\x61\xa9\x4d\x97\x96\x1e\x46\x17\x25\xb1\x42\x41\x06\xf8\x5c\xdc\x2d\xb4\x42\x6b\x06\xd8\x5c\xdc\x20\xed\xd2\x40\x96\xce\x3d\x76\x4c\xae\x9b\xd5\xcd\x8a\xc5\x7d\xee\x9d\xcf\x9a\xc3\x35\x62\x70\xb1\x1a\x8f\x61\xfe\x36\xf7\x56\xc3\x8d\xde\x21\xf6\xb6\x64\x69\x51\x22\xc7\xb8\x5b\x0a\x78\x37\xc6\xdc\x16\x86\x6c\x94\x50\xf7\xbc\xad\x7b\xd6\xd6\xbd\xa7\x85\x01\x63\x4b\xf7\xb4\x18\xf4\xb2\x28\x79\x59\xf4\x4e\x58\x67\x6b\xfb\x58\x59\x94\xd0\x8e\x9f\x86\x96\x93\x45\x9e\x2c\xba\xe3\x47\xcb\xc8\x22\xd5\x91\x49\xd3\x53\xc3\x11\x1f\xeb\x9e\x8d\x35\xe1\x62\x2d\xf6\xd6\x61\xc7\x0d\xec\x2a\xda\xcf\xc3\x36\xf9\x56\x94\x5c\x2d\x0b\xdb\xc7\xb6\xe2\xa6\xd5\x88\xc7\x46\xa9\x0c\x21\xac\x7f\xaf\x27\x60\x5a\x27\xe0\x59\x27\x67\x59\xa7\xe2\x58\xa7\x62\x58\x27\xe5\x57\x5d\xb0\xab\x78\x2d\x17\x8d\x59\xa5\xf2\xaa\x44\x56\x95\xc8\xa9\x12\x19\x55\x3a\x9f\x4a\x66\x53\xed\xb8\x54\x4b\x26\xd5\x9a\x47\xb5\x66\x51\xad\x39\x54\x6b\x06\xd5\x9a\x3f\x75\xc3\x9e\x3a\xe0\x4e\xc9\xcc\x29\x8d\x37\x75\xc4\x9a\x4e\xc4\x99\x4e\xc3\x98\x1a\xf0\xa5\xe8\xa3\xec\x18\x5b\x5a\xb0\xa2\x28\xa1\xa3\x5c\x69\x8d\x13\x45\x09\xee\x65\x4a\xeb\x4e\x01\x28\x89\x3d\x3c\xa9\x96\x0f\x45\x9e\xe6\x2b\x96\xb4\x9f\x0d\x45\xea\x67\x15\x47\x3a\x31\x17\x3a\xca\x84\x92\x6c\x78\x43\x3c\x68\x87\xef\x44\xde\x46\xda\x2c\xa8\x9e\xed\xc4\x5d\x45\xc7\x38\x50\xf1\x29\xb4\x0b\x8e\x96\x01\xb5\xa1\x17\x07\xf8\xcf\x0e\xcf\x89\x13\xdc\x62\x3f\xb5\x2c\x27\xbd\xaf\x8e\x7a\x39\x4e\x5c\x2d\x2b\x12\xd6\x2d\xc3\x39\x0d\xbf\x69\xcd\x6e\x5a\x73\x9b\xb6\xcc\x26\x81\xd7\x24\xc3\x8e\x76\x9c\xa6\x0d\xa3\xe9\x46\x13\x6e\xc1\x66\x3e\xb7\x1e\xdc\x92\xcb\x34\x60\x32\xd1\x11\x30\x06\x79\xcc\xba\x42\x1b\xb7\x12\x0f\xb3\x98\x39\x5b\x89\xdb\xed\x47\x38\x4c\xc5\x55\xa2\x15\xcc\x93\x30\x98\x13\xf0\x97\x26\xec\xa5\x3c\x49\xe0\x34\xb7\xce\xb9\xcb\x71\xe6\xb2\xea\x33\x9c\xd6\xae\x8f\xb7\xac\x6b\xf5\x09\x83\x54\x63\x01\x68\x6a\xf5\x2d\x54\xe1\x6d\xce\x32\x6f\x12\xdc\x77\x0f\x33\x96\x35\x66\x12\xed\x42\xa6\xe3\x2b\xad\xad\x15\xc3\x3a\x7b\x9a\x8d\xbd\xc5\x55\xea\xd5\xec\x94\x05\xca\x79\xf8\xc9\x29\x42\x4f\x76\x20\x45\x87\x6c\xe4\x34\x5c\xa4\x43\x26\x12\xaf\xf7\x24\xb1\x90\x54\x0e\x92\xc8\x40\xda\xf1\x8f\x56\xec\x23\x8d\x7b\xa4\x31\x8f\x44\xde\x91\xca\x3a\xd2\x38\x47\x32\xe3\x68\xc5\x37\xda\xb1\x8d\xb6\x5c\xa3\x2d\xd3\x68\xcb\x33\xda\xb2\x8c\xb6\x1c\xa3\x0b\x86\x91\xc8\x2f\x66\xcd\xbb\xc3\x39\x8b\x36\x07\xb6\xe1\x26\x6b\x1c\xf2\xbe\xd4\xba\x27\x2d\xf5\x2f\x36\x10\xd4\xbc\x77\x28\xb0\xaf\x74\x9d\xb9\xe7\x59\xd3\x6d\x7e\x7c\x75\x3f\x44\x41\xb6\x78\xe0\x49\x12\xac\x9f\xe1\xbb\x3f\xd4\xde\x86\xfd\x58\x71\x34\x14\xb5\x15\x87\x1b\x79\xd6\xce\x95\x56\xea\x30\x22\x3f\xdd\x50\xe1\xd3\x38\x16\x49\x6f\xb9\x48\xe9\x16\xf2\x5c\x1b\xf2\x2d\x92\x0d\x88\xcd\xeb\x2a\xf6\xef\x27\x55\x54\x3a\x8f\xb2\x95\xa4\x09\x22\x09\xf7\xa9\xca\xe5\x5e\x8e\xe2\x43\x14\xb5\x63\x24\x36\x48\x2b\x8d\x2d\x53\x7f\xfe\xed\x56\xbc\xe7\xd6\x1c\x50\x4d\xf8\xfd\xa7\x87\x1f\x5e\x26\x3c\xcd\x5e\x3e\xfc\xf0\xb2\x40\x43\x4f\xd4\xa1\xf3\x34\x6f\xe6\xd8\x94\x5f\xcd\xf5\x39\x11\xdc\x5e\xb4\x04\xf4\x3a\x16\x64\xdb\x84\xa7\xdb\x78\xd0\x5a\x62\x62\x1b\x69\x4f\xa4\x52\x6c\x6f\x89\xd6\x20\xca\x7f\x0f\x61\x10\x89\x73\x70\xc2\x1e\x23\xd8\xc6\x49\xf0\xbf\x62\x34\x89\xbb\x61\xae\x9d\x1d\xbe\x1b\xac\xb6\x2c\x19\x88\x63\x84\x54\x5a\xe1\x54\x54\xa3\x0a\x29\x43\x4f\x7e\xac\xc6\xc5\xa0\xad\xcd\xdd\xd3\x57\x71\x38\x9e\x92\x0f\xb1\xd8\xb4\x5d\x32\x85\x78\xd3\x22\xe8\xef\x97\xd2\x0d\x14\x46\x5d\x57\xc9\x34\x63\x19\x57\x1f\x2f\xb5\x04\x72\xbd\x29\xe7\xc6\x89\x51\xbd\xcb\xb0\x85\x12\xab\x53\xcb\x1f\x83\xbc\x92\x1a\xbe\xf3\xed\xe2\x7c\x71\x8d\x63\x3b\x7f\xba\x9e\xff\x7a\x04\x6f\xce\x3f\xcc\x8f\xc4\x9f\xe7\x97\x47\xf0\xeb\xfc\xfc\x7c\xf1\xf1\x08\x16\xd7\x62\x6b\x95\xfe\x18\x63\x3e\x13\x26\xf7\x85\xe3\x6e\xf5\x46\x4b\x88\xda\x8d\xfe\x48\x54\xde\x44\xd2\x7c\xec\x9c\x71\x9c\x7f\xfa\xe8\xcf\x54\xcb\x8c\xfe\xec\x7a\xe4\x0b\x8d\x39\x6d\xfa\xec\x30\xe5\xb1\xe9\x33\x04\x43\x53\x77\x67\x49\xd9\x04\xf9\x1c\xe1\xb0\x3a\x24\x32\x12\x6a\x35\x55\x46\xc7\x70\x59\x05\xdc\x74\x19\x97\xab\xc5\xa5\x61\xf6\x66\xf1\xb7\xf9\x11\xbc\x99\x9f\x2f\x3e\xba\x99\x18\x14\xd6\xf9\x58\xd5\x63\x7c\x76\xcc\xc7\x46\x74\xc8\xee\x78\x38\xdd\x10\x3c\x17\xe2\x31\x03\x63\xa6\x6a\x54\x8e\x08\xc3\x91\x20\x7b\xd5\xec\x33\xa2\x83\x41\xd8\xe2\x02\xb7\x5e\xc7\x87\x71\xaf\xc4\xd6\x37\xff\xcd\x80\x6c\xe9\x1c\x79\xcb\xac\x4f\xcd\x8f\xae\xfd\x5b\xba\x8d\x0f\x06\xb9\xe1\xee\x78\x61\xc1\x2b\x1c\xc3\x23\x96\x05\x0f\x1c\xd2\x15\x0b\xcb\x37\xa8\x13\x61\xef\x29\x2e\xd8\xf1\x74\x1b\xdc\x67\xa7\x87\x64\x04\x2e\x34\x1a\x1a\x9a\x6b\x51\x43\xbc\xe9\xa1\x8e\xc3\x3a\x2f\x51\xcb\xd0\x98\xee\x43\xf6\x04\x2c\xa7\x7a\x83\x34\x8e\xc6\x8e\x6d\x00\xb3\xce\xaf\x21\x0d\x76\x87\x30\x63\x11\x8f\x0f\x69\xf8\x24\x5a\xfb\x31\x2d\x3c\xec\xef\x93\x78\x07\xd9\xe3\x10\xeb\x90\x06\xbb\x20\x64\xc9\x71\xc8\xa3\x4d\x96\x5f\xce\x94\x6a\x2f\x85\xdf\xf3\x93\xcd\xc9\x11\x3c\x72\xfe\xf9\x58\x5c\x35\x8e\xc5\x9f\xf2\x2e\x48\xff\x30\x54\xcf\xc6\x27\x17\x1a\xc2\x7d\x9c\x06\xa2\x47\x95\xeb\x6f\xa0\xf4\xd1\x71\x14\x1a\x46\x44\xcc\x62\xf5\xc5\xb9\x07\xba\xbc\x63\xc9\xc0\xe9\xf1\x3d\x9c\x9f\x5d\xce\x61\x1f\xc6\x99\xec\xd7\xbe\x9a\x7d\x99\x7d\x09\x46\xcf\xf7\x23\xe7\xd2\xd6\x90\xf8\x45\x88\x1c\xff\xb1\xe1\x06\x35\x2c\xad\x33\xa8\xaa\xc3\x6c\x9b\x57\xfb\x05\xd8\x97\x60\xe0\x7a\x6d\x76\x0c\x36\x58\x63\x8d\x57\x58\xf4\xfa\xda\xf9\x5a\xb5\xbe\xe6\xeb\xc0\xf0\xe7\x81\x5a\x34\xa6\xa9\xfa\x8d\x10\x6d\x56\x00\xd3\xed\x52\xec\xe8\x71\xa4\x7b\x18\x11\x2d\xa1\x3e\xf7\xa4\x91\xe0\x98\xc9\x2b\x24\x4b\xd4\xbf\x8d\x2c\xbf\x39\x4a\xaa\x39\x9d\xdf\xbc\x9d\x9d\xcf\x9b\x47\x09\x31\xd9\x66\xd7\x63\xa7\xf4\xf3\xc5\x4f\xdf\xbf\x1a\x3e\x69\x8c\x9f\x33\x8e\xbb\xef\x1f\xf9\xbd\xaa\xdc\xd8\x8f\x44\xdd\x7a\x7e\xf3\xe4\x7e\x91\xf8\xd5\xe9\x22\x31\x2c\x0d\xb1\x48\xfc\xea\x17\x89\x7f\xa1\x45\xe2\x57\xbf\x48\xf4\x3c\xcf\xb2\x48\xec\xe2\x94\x05\x2b\xd5\x31\xf5\xea\xf4\xac\x07\xad\xe1\x70\x51\x2b\xad\xfb\x55\x6b\x0c\xf4\xfc\xbc\x33\xda\x57\x71\x24\x61\xfa\x20\x95\x6a\xc9\x68\xc3\xd7\xc0\x52\x60\xb0\x49\x02\xe5\x54\x11\x84\x12\x2c\x95\x8e\x1a\x6c\xb5\x2d\x4a\x34\x3e\xfd\x51\x8e\x33\x88\x57\xab\xc3\x5e\xfa\x07\xc7\x11\x87\x38\x81\x9d\x44\xea\x85\x84\x93\x4e\x9d\x57\x71\x74\x1f\x06\xab\xac\xb1\x34\x1c\xcb\xf7\x76\xaa\x7d\x0c\x49\xfc\xa8\xf9\xdb\x55\x1c\x1e\x76\x51\xe7\x1f\xfa\x96\x1d\xf5\xf3\xce\x5a\x34\x14\x98\x6a\x28\xf8\x54\x57\x23\x27\xa4\xb7\x7e\xd4\x69\x71\x75\x09\x13\x4d\x9b\x57\xa7\xb8\xac\xa8\xf1\x21\x1b\xa0\xb9\xe4\xc8\x06\xd4\x57\x5a\xa7\x4a\xee\x5c\x39\xc2\x0e\x0e\xd8\xa9\x94\x7c\x45\xed\x52\xd1\x5e\xf3\x8c\xd4\xbd\x26\x4a\xde\x1e\xd5\xee\xe0\x9e\x68\x32\xc6\x75\x5f\x39\xb6\xff\x6c\x79\xb0\xd9\x66\xfa\x95\x61\x3c\x58\xd9\x78\x58\xb2\x56\x47\xbc\x97\xaf\xd3\xfe\xb2\xd3\x1f\xaa\x6a\x95\x97\x4b\xc8\x8f\x60\xc7\x59\x7a\x48\xd4\xe5\x56\xce\xce\xf4\xef\x07\x96\xe8\x4d\x60\x6a\x42\x0e\x7d\xda\xc0\xf9\xa3\x55\x6f\xb5\x89\x0c\xff\x72\xac\x67\x06\x64\x74\xbe\x3d\x88\x54\xd3\x06\x71\xc4\xc2\x62\x69\x69\x98\xc2\x8a\x36\xd1\xef\xcc\xe3\x87\x8e\xbb\x90\x45\x9f\xdd\x1e\xd0\xde\x08\x91\xe8\x03\x5a\x4f\x3b\x0d\x0b\x6b\x29\x8d\xe4\xc7\x40\xba\x67\xab\xde\x5b\xeb\xe0\x92\xdb\x7c\x8e\xe1\x8b\x3a\x2c\x0c\xfc\x22\x5d\xc5\x09\x5f\xb1\xa4\xff\xaa\x7d\x0c\x19\xff\x32\x24\x22\x8c\x37\xe9\x15\x8b\x7a\xcf\x66\xe5\xbf\xbb\xed\xa5\xf3\x91\xd7\xa2\x7b\xca\x54\xe0\xb7\xd3\xf2\x77\x83\x63\xcb\xec\xc0\x6e\x12\x0d\x99\x7a\xec\x35\x08\x3d\xdb\x9a\x00\xaa\x36\xca\x26\xbd\xda\xc6\x71\xca\xd3\x3c\xfa\x5d\x18\x6f\x80\x47\x59\x1e\xb5\x6e\xe4\x20\xa9\xfc\xc5\x4e\xe0\x86\x73\xf8\x6d\xb6\x7e\x60\xd1\x8a\xaf\xe5\x88\x81\xff\x7b\x50\xe1\x8a\xfa\xad\xd1\x61\xbc\xd9\x04\xd1\xe6\xe5\x3a\x5e\xa5\x2f\x1f\x02\xfe\xf8\x92\xe5\x12\x8e\xff\xae\x0a\x0f\x2a\xbb\xc4\xb3\x88\xc2\xa7\x66\x8d\xc5\x07\xed\x98\x74\x78\x28\x83\x43\x4a\xae\xc4\xd4\xb5\x6d\x16\x81\xdc\xad\x8b\xb2\x52\x98\x0c\x97\x13\xd6\xdf\x34\x24\xa6\x00\x3c\x2e\xd9\x6e\xcc\x74\x60\x0a\xfe\xb5\xa3\x48\xd6\xdf\x80\xbb\xaa\x09\x01\x52\x79\x97\x37\x7f\x15\x4c\x5a\x85\x59\x09\xc3\xf1\x48\x6f\x62\xa5\x11\x7b\xf9\x09\xbc\x55\x16\x18\x09\x61\x85\x4f\x55\x74\x69\xd1\xe2\xe9\x61\xbf\x8f\x93\x4c\x05\x1f\x1a\x91\x58\x3a\x5a\xf0\x62\xf3\x92\x9e\x16\xf9\x8d\xa9\xb8\x3a\x6f\xe3\x34\x2b\xde\x31\xdc\x8f\x04\x3e\x04\x4f\x84\x18\x31\x20\x14\x63\x08\xba\x4c\xc2\x25\x33\xb6\x1a\xb7\x54\x1d\x97\xfd\xfd\x1a\xde\x8a\x19\x59\xfc\xef\x8e\x45\x6c\xc3\x93\x97\x57\xaa\x79\x47\x0d\x07\xd2\xdb\xe3\xb5\x1c\x4e\x3d\xbf\x2d\x17\x5e\xb7\xfb\xd1\xcd\xe8\x7a\x8e\xdc\x8f\xc6\x05\xb6\x16\xcf\xf2\xc3\x20\x3d\xec\x76\x2c\x09\xfe\x57\xe2\x19\x2d\x47\xea\x67\x3a\x58\x58\x6d\x5d\xe3\x87\x0a\x28\xc3\x90\x0d\x55\xb3\xe5\x32\x67\xb9\x55\x6e\xd8\x61\xc3\xff\x16\xf0\x47\x93\xb5\xd3\x20\x34\x65\x9b\x93\x2e\xc4\x93\x14\x45\x63\x83\xc8\x4c\x7a\x63\x44\x7d\x14\x4b\x9d\x4a\x3a\x26\x7d\xba\xca\xe1\x95\xc5\xd2\xb0\x04\x4c\x35\xc9\xc8\x9c\x1c\x35\x63\x61\xc2\x44\x86\xf1\x23\x4f\xde\xc4\x87\xc8\x20\x2e\x24\xc2\x50\x8a\x34\x95\x6a\x0e\xa3\x45\xb5\x46\x4b\x76\x95\x94\xa2\x2c\xdc\x89\xc2\x95\x97\x8d\x6c\x58\x93\xa6\x53\x4f\xc7\xf6\x9a\x5b\x03\xa5\xb1\x15\x58\xf8\xc8\x9e\x52\xb8\xe3\xb0\x49\x38\x33\x0b\xfe\x29\x83\x2f\xc5\x49\x45\x0a\x88\x6a\x8d\x55\xe5\xb0\xdf\x7f\x8b\xbd\xf3\xa1\xac\x16\xbe\x77\xe4\x27\x3d\x53\xef\x84\x5c\xba\xf2\x31\x93\xc8\x5c\x98\xae\x49\xf7\x2c\xf9\x2c\x17\xec\xe9\x56\xaf\x9b\xc6\x3b\x26\x59\xc2\x10\xaf\xc0\xae\x63\xb2\x85\xec\xd7\xb1\xf1\x3d\x09\xd4\xdd\xaf\xfc\x92\xe5\xd3\x7e\x78\x38\x9b\x2f\x8d\xbb\x20\x9a\x61\x43\xe7\xa2\x1c\xa4\xdb\x5a\xec\xce\xfb\xec\xd6\xbe\x38\x6a\x50\x89\xa2\x2d\x79\xb4\x32\x09\x0d\x90\xab\x91\xd4\xac\x0a\x76\xfb\x90\x8b\x3a\xf1\x35\xdc\x3d\xe5\x20\x52\x19\xe8\x62\x17\x44\xc1\xce\x88\x9d\xad\x12\x36\x2a\x07\x88\x22\x53\x86\x74\xc8\xaa\x1f\xaa\x8c\xd3\xd5\xbc\x13\x73\xf6\x0b\x13\xf5\x3b\x82\xe0\xbe\x16\x73\x37\x85\xfd\xe1\x2e\x0c\xd2\x2d\x17\xcd\xb0\xe2\xc0\x1f\x86\x8e\x2b\xd5\xf3\xfd\x2b\xf1\x45\x87\x8c\xa7\x10\x64\xf0\x28\xd7\x93\x28\x16\x57\xcd\xcf\xa2\x7e\x51\xca\x2b\x4f\x7b\x66\xb0\xfc\xe5\x9f\xad\xaa\xc5\x32\xa9\xfb\x57\x2f\x28\x43\x0f\xa7\x79\x74\xde\x02\x93\x37\x59\xab\xf6\xb9\x02\x90\x45\x6b\xe0\x5f\x82\x34\x4b\xd5\xbd\x4c\x5a\x27\xb6\xc1\x78\x92\xd5\xe6\x84\x99\x76\x5c\xdf\x98\x4f\x4e\x70\xb6\x9a\x35\x5f\x6a\x84\x77\x37\xed\x7a\xcd\xfc\x50\xa2\x2e\x62\xb3\x91\xed\xa6\xe6\x85\x51\x8c\x53\xb9\x1c\x06\x79\x9c\xdb\x55\x51\x39\x9d\x8d\xef\x6a\x76\xfd\xf3\xa7\xb7\xef\x67\xd7\xcb\x4f\xcb\x5f\xaf\xe6\xd8\x6c\x2c\xaa\xfc\xf9\xd9\xe5\xfc\x28\xff\xf3\x9b\xd9\xf5\x78\x58\x33\x33\xa8\xef\x78\xb0\x7a\xc6\xc5\x45\xed\x8c\x7f\xfc\x66\xd0\x82\x68\x02\x14\x00\x5d\xef\x62\xe4\xec\xae\xb3\x04\x57\xf5\xaa\x5c\xd1\xca\x98\x45\x59\xe1\x02\x3e\xd2\x08\xf9\xa1\xa6\xda\x57\x37\xc1\x43\x37\x76\xc8\x8b\x17\xa5\x5f\xac\x1c\x44\x63\x53\xfe\x9d\x0a\x2f\xcd\x56\x99\x38\xe5\xa8\xe3\xd3\x97\xa3\xd6\x9b\x82\x3c\xdc\xc0\x9a\x45\xe3\x89\x68\x94\x3b\x7b\x70\x0f\x5f\x44\xb9\xf2\xb0\xd5\x38\x4a\x15\xa2\xaa\x96\x19\xd3\xa3\x6c\x59\x06\x59\x12\x6c\x36\x3c\x11\x87\xb8\x30\x7e\x94\x11\xa8\xf3\x93\x76\xff\x1b\x46\xe5\x16\xec\x47\xf3\x0d\xec\x2e\x7e\xe0\x27\x70\xa3\x1c\xf5\xc2\xa7\xa3\xf2\x83\xe4\xbf\xbc\x94\x35\x18\x91\xcd\xe0\x91\x25\x91\xda\x12\x07\xde\xf2\x32\xff\x9a\x6c\xcb\xc7\x4e\xa4\xfa\x5e\x29\xde\xa2\x1a\xfe\x18\x0e\x91\x6c\xf4\x2f\x32\x0e\xe7\x88\xc4\xfd\x21\x93\x7b\x5a\xad\x7b\x95\x9c\x13\xf8\xfd\x69\xde\x45\xc9\x61\xb7\x4f\x8b\xb7\x9c\xfc\x01\x60\x36\xb6\x1b\xb1\xa8\xda\x81\x65\xf8\xf9\x35\x4f\xc4\xf1\xaf\xac\xba\xb4\x8d\x4b\x15\x6d\x1c\x86\xf1\xe3\xf8\xf6\x71\x1f\x1f\x92\xfa\xf4\x86\x7f\x94\x7e\xbc\xf0\xd7\x57\x47\x32\x72\x6e\xc6\x37\x71\xf2\xf4\x1a\x5e\xbc\x38\x9d\x5d\xfe\x34\xbf\x7e\xf1\x42\xfc\x7d\xde\xd8\x63\xcb\xd9\x8b\x17\xd2\x49\x5a\x94\xf9\xe7\x51\x43\xfc\x5f\x3a\xe2\x3f\xce\xae\x2f\xcf\x2e\x7f\xa2\xcb\xaf\x8b\xff\xde\x4d\xed\x95\x8f\x79\x47\xfa\x1f\x1d\x55\xbe\x12\x2f\xef\x59\xd1\xeb\xc2\xdb\x55\x3f\xbf\x7b\x1d\xbd\x8a\x47\x9d\xa1\x64\x6c\xa2\xe6\x98\x16\xe3\x50\x35\x80\x1a\x87\x47\xc5\x8b\xcc\xee\xd4\xb2\x26\xdf\xbf\x82\xbb\x43\xd6\x53\xb5\x3f\xbe\x02\x06\x79\x23\xe4\xaf\x18\x91\x99\x57\x40\x1c\x73\x64\xc0\xb1\x3b\x9e\x3d\x72\x1e\x49\x49\xd1\x1a\xfe\x22\xfe\x03\x8b\x9f\x9b\xf5\x1d\x91\xd9\xbb\x6e\xfd\xa5\x5d\xf7\xbf\xd6\xea\x6b\xb2\xf2\x2a\x47\xdf\x66\xa3\x75\x5f\x23\xa5\xaa\x76\x36\x11\x7a\x32\x7c\x66\xf8\xd6\xf4\xed\x46\x09\x6b\xa8\x27\x4a\xc3\x53\x00\x2e\x2b\x8b\x11\xfa\x06\xd6\xac\xad\x19\x4a\xe6\xa2\x6d\x4c\x71\x38\xa0\x20\x71\x06\x32\x41\x7d\xab\x63\xce\x47\xca\xed\xa0\x73\x03\x68\x9c\x91\x44\x04\x3e\x07\x88\x90\x1b\x78\x8c\x0e\x4c\x51\x3a\x30\xc5\xe9\xc0\x18\xa9\x03\x73\xac\x0e\xcc\xd1\x3a\x30\xc2\xeb\x00\x9b\x0a\xd5\x6e\x16\x62\x52\x9f\xda\xcf\x44\x6c\x12\xd3\x89\xd0\x3b\xc0\xe3\x77\x46\x32\x87\x33\x96\xa2\x10\x3c\x40\x4d\x2e\x6a\xda\x51\x33\x1c\x0f\x0c\x91\x3c\x30\xc5\xf2\xc0\x7a\xd8\x9a\xe0\x79\xe0\x08\xd1\x03\x53\x4c\x0f\xb0\x36\x0e\xb4\x95\x83\x84\xec\x01\x09\xdb\x33\x1a\xf0\xb9\x3d\xc3\x05\xba\x07\xdd\x78\x27\xd3\x98\x2d\x96\x86\x16\x62\xb0\x59\xf1\x10\xef\xe8\xc9\xf9\x9e\xc7\xb5\x15\x63\x55\xaa\x9b\x5b\x86\xfd\x91\xee\x91\xda\x5c\x45\xff\x6d\x39\xdc\x64\x6c\xf5\x79\x9d\x04\x0f\x3c\x29\xe8\x3d\x98\x5d\x9d\xb9\xb2\xd0\x66\xe8\x74\xec\xa8\x5c\x8e\xbd\xfd\x67\x98\xe7\xdc\x51\x27\xa2\xdf\xdb\xe8\x58\x55\x06\xf6\x2c\x61\x3b\x9e\xf1\x24\x25\xc6\xc3\x31\xb3\x37\x81\x5c\xb1\xef\xcd\xea\x89\xcb\xd8\xc8\xb0\x99\x3b\xd1\x79\x3b\xdb\xb9\x19\x91\x59\x31\x27\xea\x6e\x6c\x35\x1a\xbd\x5f\x47\x6e\x54\x76\xcd\xc7\x8e\x97\x8e\x91\x54\x68\x66\x9f\xf9\x20\x0d\x9c\xa5\x69\xa6\x96\x9d\x6c\x77\x08\xb3\x60\x6f\x9c\x97\xa6\x91\xea\x3f\x4e\xaa\xd8\xdf\xf0\x10\xf0\xc7\xb4\x58\xc2\x87\xbd\x89\xea\x0f\x3e\x0d\x28\x39\x09\x28\x29\x34\xb0\xa3\x04\xa0\x53\xa5\xff\x9c\x22\xf9\xe7\x57\x4a\xfd\x39\x45\xe2\x4f\xa3\xb4\x9f\xb0\x32\x72\xab\xa8\xc9\x1c\x49\xfa\x89\x4d\x78\xe5\x3e\xe5\xe7\x54\x09\x3f\x8d\xd2\x7d\x92\x62\x72\x0e\x24\xfb\x6c\x26\xf0\x44\xd5\x56\x9f\xea\x53\x13\x51\x95\xdc\x06\xc8\x48\xf0\x36\x69\x3e\x5d\xac\x5f\x16\x29\x3e\xa7\xdf\x2f\x2d\x53\x7a\x9a\x26\xf4\x84\x3b\x4c\x5c\xdf\xf1\x74\x9e\x79\xe0\x6c\xcc\x10\x1a\x4e\xe6\x59\x25\xe8\x44\xc8\x1c\x4c\xe5\xd9\x4c\xcf\x89\x90\xda\x9f\xc8\xb3\x99\x9c\x13\xb5\xce\x6b\xd3\x78\xf6\xa4\xe6\x44\x08\xee\x66\x64\xee\x4f\xcc\x89\x11\xab\x0b\xe0\xdd\x0d\xca\x8d\x5c\x97\x34\x21\xbc\xf5\x61\xb9\x11\x72\xcb\xf4\x9d\x23\x29\x39\x11\x22\xab\xf3\xdd\x60\x42\x4e\xd4\xc7\xb3\xfe\x74\xae\x4b\x52\x6a\xcb\x32\x71\xe7\x44\xc9\x38\x8d\x52\x71\xd6\xe2\x7e\xe3\xb6\x3d\x83\x84\x53\xb8\x2d\x7a\x28\x0d\x67\x33\xb5\x26\xe6\x44\x31\x94\x84\x53\x9f\x58\x13\x21\xbd\x27\x05\xa7\x2e\xe8\x37\xa6\xdf\x74\x09\x38\xb5\x49\x35\x31\x93\xac\x27\xfd\x66\x5f\x4a\x4d\xbc\xe8\x3c\xf9\xe6\x68\x42\x4d\xbc\xe4\x7a\xea\x4d\x7d\x3a\x4d\xbc\xcc\x66\xe2\x4d\x7d\x32\x4d\x2b\xa9\x3f\xbe\xd2\x49\x7d\x45\x91\xaa\x49\xba\xe9\x20\x91\x26\x2e\x9c\x3c\x2d\x89\x26\x2d\x85\x26\x29\x81\x26\x29\x7d\x26\x29\x79\x26\x35\x75\x26\x31\x71\xa6\x4d\xda\x4c\xab\xa4\x99\x96\x29\x33\x2d\x13\x66\x5a\xa6\xcb\xb4\x4c\x96\x69\x99\x2a\xd3\x45\xa2\x4c\xeb\x34\x99\xc4\x24\x99\x94\x14\x99\x4e\x12\x64\x4e\x92\x1e\x73\x8a\xe4\x98\xee\x53\x63\x4e\x90\x18\x73\xa2\xb4\x98\x46\x49\x31\x65\xce\x3e\xe4\xcd\xc2\x6d\x4a\x4c\xb3\x84\x98\xa8\x3b\xc5\x33\xa4\xc3\x9c\x22\x19\xe6\x54\xa9\x30\x27\x48\x84\x69\x94\x06\x93\x94\x5d\xd2\x20\x09\x66\x95\xd8\x12\x21\x77\x3c\x05\x66\x91\xd6\x12\x39\x1b\x7a\x13\x60\xb6\x92\x5a\x62\x6f\x58\x9a\xf4\x97\xed\x94\x96\x28\x35\x7e\x7f\xf2\x4b\x27\x59\xa4\x5a\xa9\x2f\x6d\xd3\x59\x5a\x26\xb3\xb4\x4b\x65\x89\x4e\x64\x69\x9b\x13\x92\x50\x9e\x9e\xc2\xd2\x85\x96\x97\x9c\xbe\x72\x7a\x1d\xaf\x55\xba\x4a\xc3\x64\x95\x66\x40\x53\xf1\x98\xa5\xaa\xc4\x6b\xa5\x46\x12\x55\x76\x93\x4f\xa2\xec\x3b\xdd\x34\x95\xba\xd4\x93\xb8\x95\x5d\x9b\xa4\x52\x9b\x78\x12\x21\xb7\x93\xa2\x52\x9f\x76\x12\xab\xee\x6b\x26\xa8\xec\x4b\x3a\x89\x90\xda\x48\x4f\x39\x92\x72\x92\xda\x53\xfd\x09\x27\x71\xe6\xd9\x7e\xbd\x76\x4b\x5b\x8d\xd1\xf4\xf7\xe8\xb5\xdb\x9a\x6a\xe4\xfe\x6b\x90\x6a\x12\xa5\x8b\x67\xd1\x86\x3b\x4f\x34\x69\x94\x66\x92\x62\xd5\x75\x9d\x64\x72\x9a\x14\x93\xee\x13\x4c\x4e\x97\x5e\x72\x8a\xe4\x92\xce\x52\x4b\x62\x35\x81\x84\xb4\x92\xb4\xa4\x92\xa4\x94\x92\x36\x09\x25\x2d\xd2\x49\x52\x92\x49\x52\x52\x49\x92\x12\x49\xd2\xd2\x48\x52\x92\x48\x12\x53\x48\x5a\x24\x90\xb4\x49\x1f\x69\x97\x3c\xd2\x2e\x75\xa4\x5d\xe2\x48\xbb\xb4\x91\x76\x49\x23\xed\x53\x46\x12\x13\x46\x9a\x05\x77\x2c\x1e\xf4\x95\x04\x1f\xec\xb1\x78\x7a\x9c\x9c\xc5\xf9\xe6\xb7\xca\xc9\x2c\xaf\xbd\x79\xde\xc0\x87\x1f\x5e\xaa\x22\x86\x89\x01\x73\x78\x3b\x58\xf3\x28\x93\x2e\x79\x35\xc7\xf4\x42\x4f\x53\x46\x21\x34\x3d\xb8\x8b\xad\xab\x0c\x37\x98\xc5\xe6\xd1\x41\xf6\xc1\xea\x73\xfb\x4a\xf5\x3c\x3e\xb5\x57\x9a\x37\xe3\xe4\xb8\xbd\x48\x92\xeb\xd3\x1c\x57\x2c\xfa\x2c\x46\xd1\x1d\x93\xbe\x3d\x75\x97\x56\x29\x71\x2a\xcf\x55\x14\xae\xa5\x1e\x17\xda\x00\x0c\xba\xd5\x12\x30\x7d\xef\x61\x49\x2f\xf5\x34\x5d\xbd\xde\xcb\x2b\x3f\x14\x51\x9c\x92\xbc\x7b\xb3\x18\x52\x6e\x10\x1b\xb4\xfe\x74\x14\xf0\x7b\x26\xa1\xe6\x24\x3e\x6c\xea\x01\x5a\x31\x8a\xe3\x8e\xe7\x81\x1e\xfb\xc2\xd8\x4d\x16\x57\x47\xf0\x66\xb1\x5c\x2e\x2e\xa6\x33\x53\x53\x59\xb1\xa2\xfc\x72\x71\x85\xf8\xb5\xfa\x1a\xc3\x02\xd1\x61\x57\x8d\x23\xec\x64\x1a\x8a\xf6\xae\x7b\xc6\x23\xc0\xeb\x9e\xd6\x1c\xbc\xac\xd7\x98\x36\xcc\xc5\x28\xdf\xb1\xe8\xa9\x39\x46\x63\xc5\x23\xe2\xf4\x07\x96\x83\x3a\x9f\x60\x17\x3c\xdb\x3e\xb7\xff\xfd\x75\xfd\xd5\xdf\xe2\x7a\xd6\xa8\xa0\xc5\x9a\x76\x9b\x37\xf2\xa7\x9d\x94\x74\x5b\x33\x69\xe1\xfa\x5a\xaa\xc7\x9a\x4e\xab\x4a\x37\xa4\x82\x20\x4b\x63\x70\xbc\x3e\xac\xb0\x66\x46\x65\x61\x54\x11\xb8\x65\xe4\xe3\x3b\x5e\xf7\x95\xdd\xb3\x04\x2b\xb1\x35\xae\x73\xdf\xf5\xba\x4e\x06\x05\xd9\xaa\xa7\x58\x73\x2f\xe6\xcb\xf7\x8b\xd3\x26\x67\x9b\xff\x9d\x74\xe5\x42\x88\x2c\x8a\xcd\x7e\xa9\x44\x9c\x5d\x96\x7f\xbe\xf9\x70\x51\xfe\x59\xdc\x99\x6f\x96\xd3\xad\xd1\xdd\xaf\xc2\x17\x46\xde\x7f\xab\xaf\x27\x14\x42\x5d\xe9\xab\xf6\xc4\x17\x52\x0d\x6f\x50\x4e\xc1\x16\x2c\x79\x9a\x3d\x37\xaf\x76\xa3\x79\xf3\xd7\x3c\x5b\x93\xeb\xd3\xb4\xca\xec\xf7\xe1\x13\xb0\xbc\x5d\x1b\x6e\x12\xec\xde\xf4\xb4\x0e\x70\x5b\x2b\x58\x5f\xfb\x3c\x54\x56\x7b\x3c\x54\x36\xfa\x78\xa8\xcc\x43\x65\x1e\x2a\x1b\x7a\xf1\xbf\x25\x54\xa6\xdb\xcb\x3c\x5d\xe6\xe9\x32\x4f\x97\x79\xba\xcc\xd3\x65\x9e\x2e\xf3\x74\x99\xa7\xcb\x3c\x5d\xd6\x2d\xe5\xe9\x32\x4f\x97\x79\xba\xcc\xd3\x65\x43\x8f\xa7\xcb\x3c\x5d\xe6\xe9\x32\x4f\x97\x79\xba\xcc\xd3\x65\xf9\xe3\xe9\x32\x4f\x97\x61\xe4\x4c\xaf\xec\xf5\x98\x99\xc7\xcc\x3c\x66\xe6\x31\x33\x8f\x99\x79\xcc\xcc\x63\x66\x1e\x33\xeb\x2f\xea\x31\x33\x8f\x99\x91\x4b\x7b\xcc\x6c\xe0\x69\xa7\x7b\xb8\x16\x47\x92\xe7\xcd\xf9\x20\x5f\x69\x2e\xc4\xed\xbd\xc4\xec\xe5\x8d\xcb\xc6\x95\x26\xed\x03\x03\x79\x96\x2b\x53\xf8\x65\x8f\x26\xd7\x8e\x2e\xd5\x64\xa0\x8e\xc2\xf9\x4d\xae\x79\x14\xef\x82\x88\x65\x66\x59\xe0\xec\x3d\x66\x4f\xab\x17\xe2\x8a\x4f\xd0\xaf\xd8\xba\x74\xb4\xe6\xb5\xd6\x2b\x0e\x86\xb2\x9f\xcd\x8e\x01\xe6\xe9\x3d\x00\x93\xe2\x03\x68\xce\xb3\x58\xd7\x69\xa0\x0c\x06\xb0\x4f\xf9\x01\xcf\x37\x28\x28\x55\xc3\xa4\x02\xc1\x5e\x64\xcd\xd2\x81\xa0\xce\xde\x2a\x71\xc8\x58\x4a\x10\xfc\xfd\xc8\x3c\x79\x08\x90\x86\x2b\xd8\xf8\x7b\x03\x5d\xe3\xe7\xcc\xef\x1b\x26\xf4\xfd\x06\x43\xff\x6f\xa4\xc8\x32\x85\xf8\x88\x0f\x38\x52\xec\x04\x1e\xe3\x60\xe8\x35\x8e\x14\x59\x20\x52\xe3\x9e\xe3\x48\xc1\x5d\xbf\x44\x9d\xf7\x38\x52\x68\x71\xcf\x75\xec\x41\x0e\x13\x7a\x91\x83\xa9\x27\x39\x52\x66\xb5\x6a\x0e\x78\x93\x63\x87\x6d\xdd\xf7\xbc\xd7\xa3\x1c\x3b\x68\xbb\xea\x15\x4b\x3b\x8c\x9d\x67\x39\x38\x5c\x29\x2d\x3c\xcc\xe1\xab\xec\xf9\x96\xce\xe6\x60\xea\x70\x8e\x9d\x20\x71\x0b\xd3\xd4\x3a\x9d\xa3\xb7\x8b\xae\x01\xa3\xe3\x78\x8e\x94\x59\x77\x53\xef\x71\x3e\x27\x6d\x15\x4d\x57\x75\x8d\x03\x3a\x69\x35\x6b\xb9\xab\xf7\x39\xa1\xa3\x65\xeb\x5c\xd6\x5b\x0b\x3e\x7e\x00\x8c\x6b\xf5\x91\x32\x47\x5c\xd7\x91\x3e\x36\xe2\x99\xc2\x7d\x1d\x8c\x5d\xd8\xd1\xc3\xdf\xbd\x1b\x3b\x4c\xe3\xca\x0e\xcf\xe0\xce\x0e\xa6\x2e\xed\xe8\x7d\xd8\xa9\x31\x02\x26\x31\x48\x80\xce\x28\x71\x34\xec\xde\x8e\x14\xaf\x75\x86\xef\x73\x71\xc7\xae\xba\x23\x16\x0a\x0a\x80\x0f\x35\x08\xbf\xee\x14\xdf\xeb\xea\x8e\x9d\xd3\x7a\xc7\xf8\x96\xbb\x3b\x4d\x68\xaf\x73\xbc\xce\xe5\x9d\xf6\x8a\xa6\x83\xbc\xde\xed\x9d\x26\xb9\xe9\x24\xaf\x77\x7d\xb7\x96\xfc\xea\xc7\xae\xab\x7f\xe5\xfe\x4e\xed\x4a\xb2\xb3\x3c\xa0\xad\x63\x40\x76\x9a\x07\xb2\xe3\x3c\x50\x9d\xe7\x81\xea\x40\x0f\x54\x27\x7a\xb0\x70\xa4\x07\xba\x33\x7d\xb7\x28\xce\xd2\xa3\x29\x8f\x33\x34\x81\xbd\x63\x3d\xd8\x3b\xd7\xf7\x8a\x30\xb7\xfd\xf4\x89\x40\x18\x9f\xfa\x44\x20\x2c\x50\xe0\xc8\xd9\x1e\x5c\x38\xdc\x03\xdd\xe9\x1e\x88\x8e\xf7\xe0\xca\xf9\x1e\xa6\x72\xc0\x87\x89\x9c\xf0\xc1\xcc\x11\x1f\x7d\x36\xee\xfa\xc7\xb6\x9d\xf1\xb1\xf7\x43\xe5\xba\x3f\xee\x90\x8f\x3d\x53\x95\xee\xfb\xfd\x4e\xf9\xb4\xab\x6c\xe9\xc2\xaf\x71\xcc\x47\x4a\xd4\xb9\xf1\xf7\x39\xe7\x63\xbb\xaa\xe5\xca\xaf\x77\xd0\x27\x75\xd6\x90\x3b\x3f\x5e\x29\xe7\xdc\xa5\x1f\xcc\xdc\xfa\xb1\xf7\xb7\xf2\xb2\x39\xe8\xda\x8f\xbd\x0a\x95\x20\xc0\xb8\x7b\x3f\x52\x74\xd3\xcf\xbe\xcf\xc5\x1f\xdb\x59\x1a\x20\xa0\x2b\x18\x7f\x1f\xd4\x42\x01\xed\xe5\x06\x29\x76\x08\x0c\x28\xdd\xfd\x2d\x66\x95\x06\x0e\xa0\xf8\xbb\x8a\x67\x2a\x40\x00\xdc\x43\x02\x60\x0f\x0a\x80\x35\x2c\x00\x14\x60\x00\x6c\x14\xe3\x76\xe0\x00\x58\xc1\x03\x56\x15\x77\x05\x11\xc0\x57\xd1\xe7\x5b\xf1\x04\x60\xce\x14\x10\xf4\xf9\x63\x5c\x81\xcd\x7e\xdd\xc3\x16\x20\x45\xf6\x91\x08\x2d\xbe\x00\x6d\xf8\xec\xa1\x11\x7a\x19\x03\x92\xb1\xa4\x49\x24\x74\x39\x03\xb4\x1a\x53\x43\x25\xb4\x58\x03\xca\xe1\xad\x87\x4c\x68\xf2\x06\xd8\x4d\x76\x90\x4e\x68\xf6\x1e\xf5\x54\xdc\x26\x14\xea\x16\x0a\xd2\xb1\xb8\x9f\x52\x00\xbc\xb9\xcc\x31\xa9\x00\x66\xb4\x02\xc1\x03\x60\x84\x58\x70\x60\x7f\xe9\xda\x1f\x68\xb7\x96\xca\xfe\xa0\x21\x17\xf0\x33\xd4\x71\xc4\x1c\x98\x24\x6a\x0e\xe0\x22\xe7\x60\xa7\xe9\x10\xf3\xd0\x43\x32\xd8\xda\x34\x6c\x83\xe7\x40\x2b\x80\xce\xe4\x1a\x5e\x02\x03\x01\x64\x0e\x02\xa8\x2c\x04\x58\xf2\x10\x60\xc7\x44\x00\x91\x8b\x00\x22\x1b\x01\x54\x3e\x02\xc8\x8c\x04\x10\x39\x09\xa0\xb3\x12\x9d\xca\x52\xf4\xc8\x74\x66\x02\xac\xb9\x09\xb0\x66\x27\xfa\x24\x60\x35\xc8\x36\x0c\x45\x8f\x04\xb4\x16\xdb\x96\xa5\xd0\xd4\xc3\x3c\x6d\x0f\x20\x53\xf7\x80\x23\xe6\x1b\x91\x6a\x05\xbe\xa5\x34\x3e\x30\x4d\x2a\x1f\xb0\x49\xe7\x13\x1d\x76\xe2\xf0\xfb\x5c\xd4\xc4\x65\xf1\x3a\x5c\xe1\x09\xae\xd6\xb8\x9a\x74\xcc\x1c\x65\xbb\x79\x5e\xc2\xb4\xd0\x37\xce\x4b\x94\x03\xc2\xd3\x12\x9e\x96\xe8\x3c\x9e\x96\xf0\xb4\x84\xa7\x25\x3c\x2d\xe1\x69\x89\xb1\xe7\x5f\x87\x96\xd0\xed\xf8\x9e\x95\xf0\xac\x84\x67\x25\x3c\x2b\xe1\x59\x09\xcf\x4a\x78\x56\xc2\xb3\x12\xbd\xaf\xf0\xac\x44\x47\xbc\x67\x25\xcc\x0a\x7a\x56\x02\x53\xde\xb3\x12\x9e\x95\xa8\x1e\xcf\x4a\x68\x1e\xcf\x4a\x78\x56\xc2\xb3\x12\x9e\x95\xf0\xac\x84\x67\x25\x3c\x2b\x61\xfc\x5d\x9e\x95\x40\x55\xfb\x1b\x67\x25\x74\xda\x7c\x4f\x4a\x78\x52\xc2\x93\x12\x9e\x94\xf0\xa4\x84\x27\x25\x3c\x29\xe1\x49\x09\xf3\x92\x9e\x94\x30\x2c\xe7\x49\x09\xd3\xe2\x9e\x94\xf0\xa4\x84\x27\x25\x8c\x9f\x6f\x8d\x94\xd8\x07\xab\xcf\xed\x8b\xe7\xf3\x40\x13\x57\x9a\x37\xe3\xe4\x4c\x70\xdd\x26\x57\xaa\x39\xc8\x58\xf4\x59\x0c\xa9\x3b\x26\x5d\xdb\x3a\x79\x42\xcc\x4e\x18\x78\x47\xf2\x75\x90\xf0\x15\x85\x79\xb0\x9b\x8f\xa7\xc5\x6b\xf1\x02\x9e\xa9\x0b\xcb\x1a\xd2\x93\x54\xbe\x78\x2f\xd3\x51\xc2\x21\xe5\x39\xfe\xa0\xfa\x38\x8b\x21\xe5\x21\x86\x18\x81\x6e\x96\xe8\x3d\x4b\xc5\x9f\x92\xf8\xb0\xd9\x4a\xe9\x98\x71\xa2\x9e\xc2\x17\xa6\xf4\x6e\x39\x3d\xbb\x9e\xab\xfd\xf5\xc3\xe5\xcd\xd5\xfc\xed\xd9\xbb\xb3\xf9\x29\x66\x4d\x59\x2e\xae\x8e\xe0\xcd\x62\xb9\x5c\x5c\x98\xbb\x3c\x60\x13\xa7\x69\x6b\x89\x28\xbf\x5c\x5c\x21\x7e\xad\xbe\xc6\xb0\x40\x74\xd8\x55\xe3\x08\x3b\xa3\x82\x28\xe3\x1b\xc4\x26\x25\xee\x8d\x2c\x93\xe5\xfe\xfc\x27\xea\x44\xbc\xac\xd7\x98\x36\xcc\xc5\x28\xdf\xb1\xe8\xa9\x39\x46\xa5\x1a\x04\xa5\x10\x14\xb7\x61\xbb\x41\x9d\x4f\xb0\x0b\x9e\x6d\x31\x3c\x8c\x8b\x05\xed\xba\xfe\xea\x6f\x76\x51\x6b\xd4\xd2\x62\x61\xbb\xcd\x5b\xfa\xd3\x4e\x4a\xba\xad\x25\x5f\xc7\x75\xb8\xd4\xfd\x35\x55\xb6\x4a\xd9\xc5\xa3\x2c\x7c\x52\x9e\x00\xf1\xfa\xb0\xc2\x26\xc4\x57\x66\xe4\xc7\x6d\xb0\xda\x96\xd8\x4a\xcd\x69\x7c\x8f\xb3\xd7\xb5\x8d\xf5\x59\x5c\xa0\x1f\x75\xa5\x12\xc1\xb5\xb0\x58\x78\x2f\xe6\xcb\xf7\x8b\xd3\xc6\xaa\x5b\xfc\x9d\xf4\x2e\x44\x88\x2c\x8a\xcd\x7e\xa9\x44\x9c\x5d\x96\x7f\x96\x9e\x85\xf9\x9f\xc5\xd5\xfc\x66\x39\xdd\x42\xdd\xfd\x2a\x7c\x61\x64\x82\xc6\xea\xeb\x09\x85\x50\x39\x27\xab\xf6\xc4\x17\x52\x0d\x6f\x50\x2e\xd5\x24\xd1\x7e\x9e\xa3\xb6\x2e\x7d\xf7\x57\x3f\x6a\x93\x2b\xd5\xb4\x5e\xed\xf7\xe1\x13\xb0\xbc\x71\xeb\x8e\x31\xc0\xee\xcd\x6f\x89\x25\xeb\x2c\x16\x3f\xb1\xa6\x1c\x32\x53\x45\x20\x01\x4e\xa6\xb2\x9e\x2e\xf6\x36\x2a\xe3\x39\x15\xdf\x69\xc2\x76\xca\xd5\x1e\xb7\x17\x8d\x71\x9d\x53\x90\x9a\x26\x94\xa6\xdc\xc0\x10\x32\x8d\x08\x4d\x64\xee\xed\x71\x3a\x93\x92\x21\xdb\x2d\x99\x39\x15\x95\x69\x44\x64\x92\x4c\x01\x03\x34\x66\x93\xaf\x44\x9e\x55\x74\x24\xa6\x86\x15\x21\xb7\x01\xd2\xd5\xc5\x86\xc0\x74\xb1\x7e\x59\x90\x97\xcf\xbd\x9b\x59\x52\x97\x46\xc4\xa5\x72\x0c\x40\x74\xfe\x18\x6d\x49\x71\xd5\x18\x22\x2d\x1b\xec\x24\xc6\x2f\x48\x4b\x59\x6a\xb9\x49\x84\xd4\x2e\x61\xa9\x67\x26\x51\xab\xbe\x96\xae\xec\xe1\x25\x11\x82\xbb\x20\x7d\x3f\x2b\x89\x11\xdb\xf6\x43\xd0\x7b\x16\x20\x57\xa9\x29\x18\x49\x33\x3e\x12\x30\xa3\xca\x3d\x1b\x39\x01\x17\x39\x35\x13\x69\xc4\x43\xd2\x5c\x16\xdc\xbb\x2b\x20\x5c\x15\xb0\x06\x93\x5e\x06\x52\x4f\x35\x22\xa4\xf7\xf0\x8f\x96\x7e\x0a\x5a\xf6\x51\x4b\x33\x62\x26\x59\x0f\xf7\xd8\x47\x32\xe2\x45\xe7\xcc\xe3\x28\xc5\x88\x97\x5c\xe7\x1d\xf5\x04\x23\x5e\x66\x93\x75\xd4\xd3\x8b\x56\x52\x7f\x7c\xa5\x93\xfa\x8a\x22\x55\xc3\x38\x3a\xa0\x16\xb1\x9a\x21\x0a\xad\x48\x23\x15\x49\x94\x22\x89\x50\x24\xd1\x89\x54\x32\x91\x48\x25\xda\x10\x89\x56\x34\xa2\x25\x89\x68\x49\x21\x5a\x12\x88\x96\xf4\xa1\x25\x79\xe8\x82\x3a\xb4\x26\x0e\x89\xb4\x21\x85\x34\x74\x42\x19\x4e\x42\x18\x4e\x41\x17\x1a\x90\x85\xc8\xcb\xc5\x18\x55\x58\x70\x82\x98\x8b\xd5\x18\x51\x58\x63\x04\x11\x62\x7b\x69\xc2\x0e\x1f\x88\xbc\x59\x34\x49\x42\x2d\x1b\x88\x90\xd8\x43\x11\x76\xb8\x40\xcc\x61\x4f\x47\x10\x1a\x30\x81\xb8\x93\x9f\x73\x1e\xd0\x80\x05\xac\xae\x4b\x08\xb9\x83\x1c\x60\x8d\xec\xc3\x5c\x59\xc7\x18\xc0\x26\x28\x87\xbb\x0a\x69\xf9\x3f\x2d\xd1\x87\x90\xdb\x15\xda\x47\xf3\x59\x58\x2e\x87\x48\x3e\xec\x0d\x4b\x87\x30\xb7\x28\x3e\x94\x52\x3f\x67\x49\xa6\x22\xf8\x1c\xd3\x7b\x96\xe4\x9e\x1d\xb5\x87\x26\xf6\x88\xd0\x9b\x0d\xa9\x47\xa7\xf4\x5c\xe8\x7c\xc9\x74\xde\x73\x6b\x7c\xad\xc8\x3c\x43\x2a\x0f\x50\x33\x71\x94\xc8\x23\x92\xc4\x7d\x34\x5e\x2f\x5f\x87\xb2\xfd\xd4\x49\xbc\x7e\xb6\x0e\xb7\xce\x6b\x29\x3c\x2d\x57\x87\x90\xdb\x21\xf0\xf4\x4c\x1d\x56\xf9\x57\xd2\x77\x83\x3c\x1d\x42\x6a\x83\xbc\x1b\x61\xe9\xa8\x3d\xd5\xcf\xd1\xe1\x4c\xb7\xe3\x11\x01\x73\xdd\x35\x46\xef\xdf\x17\x0d\x90\xce\xcf\x19\xb0\x73\x48\xbb\xec\x08\x37\x47\xd5\x50\xf7\x32\x73\x1d\x0a\x0e\x69\xf1\x6d\xf1\x72\x7a\x75\x32\x42\x66\x47\xf1\xec\xea\xa8\xe0\x38\xac\x5e\x47\xf1\x3c\x4c\xbd\x21\xb5\xd2\x7d\xe1\xf4\xa8\x91\xf4\xc6\xa2\xe8\x4d\xa6\x17\x24\x50\x6e\x34\xc2\x8d\x44\xb7\xd9\x90\x6d\x16\x54\x1b\x85\x68\xa3\xd0\x6c\x24\x92\x8d\x46\xb1\x51\x08\x36\x22\xbd\x66\x41\xae\xd9\x50\x6b\x76\xc4\x9a\x1d\xad\x66\x47\xaa\xd9\x51\x6a\x76\x84\x9a\x3d\x9d\x46\x22\xd3\xb2\xe6\xd9\xfd\x9c\x45\x9b\x03\xdb\xf0\xf1\x95\x0b\x75\x53\x69\xdd\x50\x96\xfa\x97\x8e\x8a\x69\x9e\xf9\x15\xb6\x55\xba\x68\xdc\xf3\xac\xe9\x50\x3d\xb6\x56\x1f\xa2\x20\x5b\x3c\xf0\x24\x09\xd6\x13\x7f\xef\x87\xda\x9b\x70\x1f\x29\x0e\x73\xa2\x9e\xe2\x58\x22\x0f\x1e\xb9\x72\x47\x1d\x24\xe4\x27\x1b\x1d\x18\xdb\x2a\x85\x28\x8e\xd4\xdd\x3d\x57\x06\xc8\x77\x48\x6f\xf1\xd8\xb4\x9e\x62\xff\x7d\x52\x05\xa5\xb2\x92\xad\xa4\x77\x79\x24\xc1\x2d\x55\xb1\xdc\x53\x4e\x7c\x84\x22\x36\x0c\x84\x06\x69\xa5\xa4\x61\xea\xcf\xbf\xdd\x8a\xb7\xdc\x9a\xc3\x86\x09\xbf\xff\xf4\xf0\xc3\xcb\x84\xa7\xd9\xcb\x87\x1f\x5e\x16\xa8\xdf\x89\x3a\x26\x9e\xe6\x0d\x1c\x9b\xb1\x88\xb9\x9e\x24\x82\xdb\x8b\x56\xf1\xde\xf0\xbf\x19\xff\x92\xf5\x8f\x28\x23\xff\xe4\xf6\x8c\xe1\x5f\x0c\x7e\x6b\x76\x6f\x1f\x94\xd5\x9a\x62\x09\x7b\xcc\xc7\xbc\x38\x56\xef\x58\xf2\x79\x1d\x3f\x46\xb0\x0e\xd2\x7d\xc8\x94\x3a\x9c\x7f\xc9\xc4\x31\x4e\x0c\xcc\xc1\x00\x33\xaa\x96\xab\x38\xba\x0f\x83\x55\x36\xa0\xc2\x39\x86\x2f\x4f\x6f\xb7\x2c\xe9\xaf\xe4\x31\xa4\x85\xee\x61\xe0\x37\x77\x21\x8b\x3e\x0f\xfc\x7b\x18\x6f\xd2\x2b\x16\xf1\x7e\xa5\xa2\x99\xb3\x72\xfe\xe5\xc3\x4b\x88\xf1\xf2\xd1\x76\xe0\x53\xc2\x07\x8b\x74\x56\x0c\xd1\x27\x45\xb5\xf2\x3b\x7b\xde\x63\xc3\x27\xd8\x1c\xab\x9a\xe4\x43\xde\x8d\x2f\x00\x84\x71\xac\xa4\x8e\x2a\x8e\x34\x6c\x62\xbb\x99\x82\x34\xff\xfc\x0c\x11\x2b\xbb\xf0\x26\x79\xb7\xb8\xbe\x98\x2d\x5b\x10\xcb\xec\xfa\xe7\xd3\xc5\xc7\xcb\x23\xb8\x9e\x7d\x1c\xf6\x27\x18\xbf\x2b\x1c\x6b\x5e\x31\x52\xa0\x78\xff\xc8\xcf\xae\x67\x1f\xfb\x56\xb1\x20\x0b\x07\x36\x46\xa3\x81\xd0\xd9\xf8\xb3\x81\x36\x6d\xf4\xd2\x42\xfe\x97\x85\x6a\xfb\x90\x75\x29\xee\xe6\x8f\xb2\xfb\xfb\x06\x72\xbe\x78\xb8\x5d\x7f\x7f\x19\x59\x91\x90\x43\x77\x4c\x5c\x6b\x15\x5e\x89\x1f\xb7\x8c\x57\x23\x79\xb6\x10\xab\xad\xc9\x5a\x9a\x0d\xed\x19\x2e\x96\xda\xf1\x7c\x78\xc7\xf2\x8b\x6f\x86\xcc\xb7\x86\xeb\xb5\x68\x4d\x35\xbc\x46\x0c\x09\xc6\x24\x51\x7b\xd1\xae\xbd\xc1\xe5\x8a\x97\x0f\x1b\x63\xf1\x8d\x61\x74\xaa\x76\x00\x88\x55\x41\xa9\x2f\x15\xb3\x49\x36\xc7\xf0\x59\xd9\x1c\xda\xd9\xc5\x53\x9f\xa5\x2f\x62\x83\xb3\xa9\x7d\xbb\x8a\xd7\x18\xd9\x23\xba\x7e\xe7\x6a\xb6\x8a\x96\xd0\xf8\x1d\x5e\x2c\x4e\xe7\x58\xc4\xfc\xed\xe2\x7c\x71\x7d\x04\xbf\x7c\xba\x9e\xfd\x7a\x04\x37\xcb\xd9\xf2\xff\x67\xef\xfd\x9e\xdb\xc6\xb1\x3c\xde\xf7\xfb\x57\x9c\x87\xad\xca\x4c\x95\xec\xa4\xb7\x67\x66\x6b\xe7\x4d\x89\xd5\x1d\xd7\xd8\x96\xaf\xad\x4c\x77\xaa\x6b\x2b\xa6\x25\xd8\xe2\x0d\x45\x6a\x49\xca\x8e\x67\x6b\xff\xf7\x5b\x00\xf8\x0b\x24\x48\xe2\x1c\x80\x72\x7a\x1b\x78\x19\x4f\x5a\x38\x04\x41\x10\x04\xce\xc1\xe7\x7b\x6e\xc7\xcf\xa7\x99\xf9\x9f\x4e\x3a\x0d\x32\xa8\x22\xda\x63\xf0\x3b\xd1\x60\x83\xdf\x89\x3b\x1a\xf8\x5d\xf9\xde\x9b\xbc\xac\xe3\x07\x6e\xda\x72\x0c\x63\x93\x0a\x0c\x8b\xa2\x88\x5d\x59\xb5\xba\x92\x07\xe2\xc3\xcc\xe4\xad\x22\x84\x51\xf1\xa1\x53\xa3\x70\x29\x82\x98\xa4\xbd\x59\x46\x9d\x6c\x9e\x14\xf5\xa4\xed\xb6\x18\xf9\x3d\x06\x3a\x8c\xd8\x23\x8b\x37\x2b\xb6\xdb\x47\x41\x6e\x30\x97\xa1\x63\xb6\xad\xf1\x77\xa1\x5c\xcf\xa0\xbe\x3a\xe1\xcc\x45\x58\x91\x57\x2d\xf7\x69\x7c\x5e\x8f\x83\x1d\xff\xf3\xae\x76\xb4\x98\xf9\xd5\x3b\x00\x89\x18\xdd\xc5\xc9\x9c\x30\x83\x6c\x9b\x1c\xa2\x8d\x08\x67\x60\x82\xd4\x02\x3c\x10\xcc\xe2\x3e\x89\x8a\x98\x57\xb1\x96\x13\xf1\x8f\xbb\x7f\xfb\x9f\x28\xb8\x67\xd1\x17\xde\x25\xff\x6b\x18\xfd\x6c\x10\xee\x29\xcb\x92\xe8\x89\x95\x7c\x9e\xb0\xf5\xe6\x4d\x26\x27\xdd\x53\x18\x9f\x2c\x77\x61\x3c\xc7\x63\xa6\x76\x0f\xfe\xb2\x73\x4d\xec\xc3\x57\x97\xc8\x51\xf2\xcc\x52\xb8\x4f\x0e\x12\x53\xa9\x23\xa3\x46\xbd\xf9\xc0\xdf\x3d\x16\xaf\x5f\x8a\x75\x41\x98\x55\xcf\x7e\x26\x8e\x21\x31\xde\x4e\xb6\x81\xfb\x97\x22\xb0\xf2\x62\xfa\xf4\x85\x68\x53\x18\x87\xbb\xc3\xae\x11\xfe\x94\xd1\x9a\x52\x23\x46\x44\x7f\x8d\x03\x60\xc5\x0a\x58\xfa\xff\x7e\x4a\x52\x60\xdf\x02\xde\xc4\x19\x84\x0f\x0d\x88\x35\x83\xfd\xe1\x3e\x0a\x33\x33\x17\x19\x40\x12\xaf\x19\xb0\x27\x6e\xf4\x87\x77\xbc\xc9\x87\x9c\x65\xb3\x22\xc2\x18\xc6\x5f\x68\x8c\x43\xe3\x9d\x29\xa1\xcf\xda\xfa\x29\x9c\xe7\xf0\x2c\x7e\x10\x27\x39\xec\x82\xaf\xa6\x3d\x10\x67\xac\xf6\x7a\xca\xd3\x51\xe1\x63\xf1\xd8\x83\x5c\x1c\xaa\x93\xd7\xa8\x60\x61\x83\xf3\x6e\xfb\x28\xc9\x57\x7c\x54\x4f\x3e\xfa\xaf\x8b\x2b\x61\x6a\x92\x3e\x39\xe5\x85\x0c\x0f\x97\xe8\x3c\x05\xd5\xbb\x50\x3d\x49\xde\x4f\xfc\x65\x30\xa4\xe9\xea\x65\x76\x77\x41\x78\x7d\xb1\x5c\x7d\x59\x7d\xbe\x46\xaf\x0a\x01\x2e\xce\xaf\x16\x62\x3d\xf8\xe1\x1f\x8b\xb3\x2f\xf3\x9b\xc5\xbc\xfe\x7f\xef\xe7\x37\x33\xf8\xb8\x98\xaf\x2e\xe7\xd7\x26\x30\x83\x69\xb8\xf2\x44\xdf\x60\xa3\x9a\xbc\xbd\x46\x3f\x6c\xde\x12\xaa\xc2\xfb\xb9\x49\x6c\xf1\xa4\xec\x98\xf1\x8d\x8a\xba\xcc\x30\x7f\x2d\x8c\xd5\x27\x86\x23\x23\x93\xbf\x1d\xf8\xeb\xf5\x2c\x85\x8b\x93\xa5\xfc\xe3\x21\xa6\x65\xe1\x1a\x46\xce\xe7\xe2\x25\x7b\x48\x93\x9d\x78\x61\x6e\xf3\x60\xfd\x75\x93\x86\x4f\x2c\x2d\x34\xff\x32\x98\x5f\x9f\x1b\xc9\xf4\x21\x45\x26\x72\x92\xa4\x1f\x29\x05\x7e\xef\xf3\x46\xc9\x34\x3a\x7d\xe8\xc4\x36\x28\x03\x41\xd6\x84\x7d\x90\x06\x3b\x96\xb3\x34\x53\x23\x72\xc6\x31\x4c\xdc\xc1\x6c\xd3\xfd\x42\x59\x4e\x0a\x51\x2d\xc3\x9f\x53\x72\xd3\x07\x58\xb5\x1a\x59\x08\x23\x09\x34\x6a\x25\xa4\x44\x47\x93\x0e\x26\x5a\x93\x94\xb1\xf5\xfe\x05\x36\xec\x21\x38\x44\xf9\xac\x50\x9e\x79\x26\x27\x74\x29\x17\x86\x35\x19\xfa\x49\x28\x14\x56\xe7\xa1\x6b\xd6\x1f\x65\x77\xc7\xf7\x4b\x7b\xf5\x7c\xa8\x98\x0c\x6b\x84\xfa\x29\x64\xcf\xb8\xc6\x26\xf5\x62\x16\x83\x2a\x50\x86\x2d\xd8\x08\xed\xc8\xe2\x22\x2b\x8a\x9d\xe8\x8e\x2c\x06\xd2\x3b\x48\x8b\x75\xc6\xf7\x6c\x50\x80\x07\x6d\xb6\x29\xd8\xa3\x97\xe1\x41\x9b\x9c\x5c\xb6\x47\x16\x13\xf1\x1e\xb4\xd1\x52\xad\xae\x5f\xc2\x87\xd0\x1d\x95\xe4\xcf\xa0\x90\x0f\xda\x70\x2d\xfc\xd3\x27\xe7\x83\x36\xd9\x92\xff\xd1\x88\xfa\xa0\x4d\x0e\xaa\xe0\x10\xb3\x8d\x81\x6b\x29\x20\x59\xcc\x04\x81\xd0\x66\x8b\x20\xc7\xb8\x2c\x10\xe1\xe5\x65\xb1\x8b\x3c\x58\x36\x12\x41\x45\x4b\x9c\xcd\xbc\x16\x72\x41\x2d\x7b\x13\xaf\x25\x2c\xc5\x82\x64\x31\x92\x0c\xc2\x8f\x8b\xa4\xa5\xb8\xd9\x16\x0e\xa2\x4d\xb6\x3a\x02\xa5\x21\x1f\x44\x9c\xc0\x9a\x72\x43\x3d\x22\x42\xf8\x8f\xa5\x46\x74\x48\x23\x25\x44\x9c\xcb\x5b\xd2\x43\x7d\x82\x42\xa4\x0f\x67\x57\x80\xa8\xf5\xe1\xa0\x0c\x86\x71\x40\x03\x6d\x75\x44\x8c\x88\x34\x1e\x46\x25\x89\x88\xe3\xe0\x74\x40\x98\x88\x3a\xb4\xa4\x90\x51\x9f\x3c\x11\xe5\x1b\xa1\xdc\xbd\x46\xa4\x88\x7c\xf3\x7a\x74\x44\x4a\x15\xa1\x8d\xba\x96\x36\x92\xc5\x48\xe0\x08\x6d\xd5\x31\x97\x52\x34\xd5\x39\x9d\x22\x4b\x57\x1c\xa9\x2d\x79\x64\xb5\xf4\xb2\x97\x48\x6a\x34\x73\x50\x28\x09\x3f\xed\xe9\x16\x6f\x1d\x2f\x35\xda\x6c\x53\x5e\xa9\x57\x34\x89\x6a\xb5\x2d\xb2\xd4\x92\x4e\xa2\x9a\xed\x95\x5a\xd2\x09\x28\x51\x2f\xa2\x0a\x2e\xe9\x65\x94\xa8\xb6\x55\xd9\x25\xbd\x98\x92\x03\xdb\xef\xfe\xda\x95\x8f\xaa\x25\x95\xe8\x0f\x95\x2c\xc1\x24\x0b\x3e\xb5\x18\x55\x8e\xa9\x55\x17\x9d\xfe\x8a\x28\xcd\xd4\xae\x8a\xcc\xd6\x45\x94\x69\x6a\x57\xc5\x27\xd0\x22\x4b\x36\xe9\x2a\xe3\xd3\x5f\x59\xca\x37\xb5\x4c\xd0\xd3\x70\x59\x4b\x39\x0d\x18\xc1\x25\xc2\xb2\x96\x75\xea\x37\x82\x4c\xc9\xe5\x46\xe2\xa9\x65\xc9\x2e\xbf\x17\x51\xee\x49\x16\x8a\xe8\x93\x2c\x4e\xa4\x9f\x64\x99\x44\x00\x4a\x96\x29\x64\xa0\x64\x31\x10\x83\xa2\x78\x68\x46\x25\xa1\xf0\x0b\x7b\x29\x21\x35\x2e\x0c\x85\xdf\x86\x54\x42\x52\xfd\xf2\x50\xd4\x1d\x53\x25\x27\xa5\x11\x89\x42\xdb\xd4\x89\x4a\x29\x52\x51\x84\x2d\x48\xaf\xb4\x54\x21\x18\x85\xef\xcd\x09\x05\xa6\x8a\x46\x8f\xc9\x4c\x91\x37\x8d\x43\x62\x53\xf8\x15\x7e\x4b\x9c\x4a\x23\x39\x45\x78\xb3\x34\x12\x55\xe3\xc2\x53\xe8\xeb\xa8\x1a\x50\x3a\xf9\x29\x7c\x77\xf4\xc8\x55\x39\x30\xdd\x27\x5a\x55\x4d\x44\xd4\xb7\xd7\x91\x74\x95\x2c\x06\x02\x56\x96\x1e\x4a\x8d\x8c\x15\x25\x1c\xd0\xff\x9d\x69\x8a\x59\x59\x8d\x5d\xa2\xcb\xde\x5a\x02\xab\x69\x84\x2a\x84\x25\x0b\x5a\x0e\x4b\x16\x8b\x90\x81\x9d\x34\x96\x2c\x74\x81\x2c\x59\xdc\x45\x3c\xc8\x62\x59\x2d\x6b\x13\xc7\x3b\xac\xa4\xb2\x64\x31\x10\xcc\x42\x8f\x04\x29\xb0\x35\x20\x9b\x85\x9f\xf1\x3a\x41\x8e\x5e\xf1\x2c\xb4\xed\x3e\xb1\xad\x96\x84\x16\xda\x6e\xaf\xe4\x56\xaf\x90\x16\x31\xa8\xa4\x0a\x6f\x75\xe5\xb4\x08\x4e\x5a\x8d\xfc\x56\x4b\x54\x8b\xb6\x3c\xec\x11\xe1\x52\xa5\xb5\xf0\x1f\xef\x41\x29\x2e\xa4\x40\x64\x61\xd2\xa9\x20\x97\x2c\xaf\x13\xf5\x21\x0a\x5e\xc9\x32\x94\x84\xa2\x3e\x90\x43\x78\x35\xe2\x31\xa1\x2e\x27\x91\xaa\xae\x5c\x17\x75\xad\x55\xc7\x68\x34\xa2\x5d\x94\x91\xe0\x38\x67\x84\x2c\xee\x33\x47\x14\xed\x1d\xcb\x1f\x41\x3e\x3a\xe5\x4e\xf6\x4b\x96\x29\xc4\xbf\x64\x71\x26\x01\x26\x0b\xcd\x2f\x4d\x90\x03\x53\xab\xe2\x44\xc1\xd4\xba\x28\x69\x30\xb5\x2a\x45\x20\x4c\xb5\x40\x90\x09\x53\x0d\x50\x7c\xea\x78\xc9\xb0\x56\x4d\x4a\x0c\x80\x22\x1f\xa6\xd6\xa5\x38\xf2\x49\x52\x62\x6a\x65\x0b\x4f\x3c\x5d\x56\x4c\xb5\x60\xe7\x87\xb7\x91\x18\xeb\xb7\x81\xf7\x7d\xdb\xc8\x8d\xf5\xda\x20\xf8\xe0\x6d\xa5\xc7\xb4\x6d\x31\x13\x20\xab\xcb\x03\x8a\xbd\x00\x07\xd2\xce\x85\x32\x0d\x86\x09\x80\x61\x1a\xfc\xb7\xfa\x88\x6d\x71\x3f\xe6\xaa\x55\x4f\x3f\xbe\x95\x55\x8c\x64\xa9\x1a\xbd\x20\x4e\x11\x6f\x58\x9c\xcb\x23\xca\x82\xb7\x14\xeb\xac\xd2\xc3\x57\x7a\x8c\x90\x47\x94\xf9\x27\xb6\xd4\xce\xe2\xdf\x6b\x01\xf6\x98\x7f\xfe\xf6\x9a\x2c\xdc\xc7\x67\x22\x74\xb9\xc0\x29\xd6\xa6\xd8\xe0\x5b\xb6\x4d\x1d\x87\x32\x2d\x39\xdc\x07\xe2\xe4\x61\x13\x43\x40\x25\x92\x07\x0b\x82\x60\x13\xa6\x6c\x8d\x45\x5f\xc0\xa9\x3f\xe7\xac\x6c\xc2\xf7\xe4\xc8\xd1\x3d\xe7\xaa\xa1\xd6\x07\x57\x3f\x4a\x27\xcc\x21\x63\x05\x1e\x23\x06\x02\x65\xab\x92\xb1\x88\xad\xf3\x6e\x1e\x92\x7d\x90\xf1\xbf\xd2\xe4\xf0\x88\x8f\xed\x08\xca\x5f\x0e\xc1\x2e\xfd\x7a\x76\x7e\xb3\x90\xdf\x71\x34\xfd\xda\x2c\xab\xe5\xf5\x0c\xde\x2f\x57\xab\xe5\xe5\x71\x8e\x8d\x68\xdb\x8d\xb6\xb2\x5a\x8e\x23\xa8\xed\x3a\xf2\x2e\x51\xd5\xe2\xc3\xae\x1e\x7d\xb4\x57\x33\x8c\x73\xf6\x88\xde\xff\x16\x7a\x72\xbc\xf6\xdf\xfe\x62\xf7\x5e\x5f\x35\xef\xc1\xe6\x75\xe1\x6f\xcb\x2e\x88\x5f\xd4\x51\x8e\x0f\x81\x05\x51\x24\xdf\xbb\xe6\xcb\xd1\x1c\xec\x28\x8b\xc5\x4b\x7b\xc9\xf2\xed\x6b\x92\x57\x37\xcd\x66\x7c\xef\x33\xa8\xd2\x58\xeb\x59\xf4\xae\x78\x04\x5f\x76\xc2\xde\x5d\x23\xbc\x4b\x99\x4a\x85\x13\x54\xf5\x62\x4b\xff\x1d\x8b\x71\xd9\x7c\x2a\x8b\xfb\x34\x11\xc9\x86\xea\x5c\xc1\x0d\x05\x14\x82\x63\xa6\x41\x28\xec\x83\x82\xb4\xb2\x7c\x27\x0a\x52\xa9\xe9\x8b\xb3\x3e\x49\x7a\xb9\x58\x7d\x5c\x9e\xb5\xa4\x15\xe5\xbf\xc9\x03\xa5\xe5\xff\x99\xff\x8a\xff\x6e\x94\x75\xcf\x6b\x3b\xe2\x40\x69\xf1\xf7\xc5\x7c\xb5\xb8\x5d\x1d\xe7\x8b\xd2\xbd\x4f\xaa\x09\x92\x2f\xa2\xee\x45\x72\x55\x82\xdb\xa5\xee\x73\x6a\x55\xf9\x88\x8c\x6b\x67\x9a\x34\x34\xc7\xdf\x93\xe8\x92\xe1\x7c\x2f\x7b\x12\xcb\xb6\xa9\x51\xc6\xfd\x3e\x7a\x81\xa0\xe8\x75\xf2\xe1\xa8\xe0\x21\x67\x29\xdc\x35\xaa\x37\x67\x67\x8f\x46\x1b\x17\x8f\x46\x7b\x34\xba\x2a\x1e\x8d\xf6\x68\xb4\x47\xa3\xfb\xcb\xef\x0a\x8d\xd6\x7d\xb5\x3d\x23\xed\x19\x69\xcf\x48\x7b\x46\xda\x33\xd2\x9e\x91\xf6\x8c\xb4\x67\xa4\x3d\x23\xed\x19\x69\xcf\x48\x9b\x56\xf5\x8c\xb4\x67\xa4\x3d\x23\xad\x2f\x9e\x91\x1e\x28\x9e\x91\xf6\x8c\xb4\x67\xa4\xcb\x46\x7b\x46\xda\x33\xd2\xcd\xe2\x19\x69\xcf\x48\xe3\x8d\x78\x46\xda\x33\xd2\xe6\x81\x0f\x0f\x4b\x7b\x58\xda\xc3\xd2\x1e\x96\xf6\xb0\xb4\x87\xa5\xc1\xc3\xd2\x1e\x96\x1e\xad\xea\x61\x69\x42\x4d\x0f\x4b\x1b\x55\xf6\xb0\xb4\x87\xa5\xc7\xdb\x82\x81\xa5\xdb\xe9\xea\x6e\xf8\x42\xec\xf5\x72\xd6\x89\xcb\x63\x4d\x4d\xb1\x17\xc4\x34\x44\xd9\xe0\x5d\x6b\xd2\xd6\x05\x20\xd6\xb7\x70\xcf\xf2\x67\xc6\xcc\x3d\xd5\xf9\x73\xa2\xa1\x6f\x8d\xbd\x3b\x94\x13\xea\x1b\x16\x27\xbb\x30\x0e\xf2\xe4\x15\xf8\xea\xb3\xfa\xe2\x14\x23\x93\x8d\x03\x5a\xbb\x3a\x71\xa1\x46\xdf\x96\xcb\x67\x31\x2e\x30\x0b\x1b\x6c\x9a\x42\xc0\xa7\x2a\x04\x1b\xb8\x81\x86\xc2\x00\x7d\x08\x81\xab\xd4\x85\x70\xec\xa1\x44\x6f\xa6\x51\x4a\x43\xab\x7d\x98\x36\xad\x21\xda\x62\x3b\x0d\xa2\x36\xb5\x21\xde\x6a\x37\x15\x22\x35\xbd\x21\x58\x0c\x75\xb0\x67\x79\xc0\xd6\x33\xec\x98\xe9\x81\x89\xb8\x1e\x18\x63\x7b\x68\x16\x0b\x1e\xa8\x87\xef\x21\xd9\x6c\x30\x41\xc7\x62\x7c\x60\x9c\xf3\x21\xd9\x5c\x6d\xc3\x6c\x0a\xd6\x07\xc6\x78\x1f\x92\xc5\x9a\x11\xea\x61\x7e\x48\x56\x1b\x9c\x50\x1f\xf7\x43\xb2\x5b\xb3\x42\x2e\x51\x17\xe8\x44\xcb\x74\xfc\x0f\x6d\x80\x35\xe7\xf3\x36\x03\x64\x35\x04\xd4\x23\xc2\x2a\x07\x44\x32\x3c\x08\x53\x21\x16\xcf\xcd\xe2\x24\x06\xe9\x82\x21\x02\xe7\x33\xbe\x35\x4b\x04\xaf\xb8\xee\x71\x82\x15\xc1\x64\x68\x11\x4c\x82\x17\x81\x01\x62\x44\xfe\x78\x0d\x60\x46\x64\x9b\x75\xb0\x4a\x87\x1a\xd1\x26\xd0\x0e\x9e\xd4\xc2\x8d\x88\xbd\xea\x14\x51\x82\x31\x4c\x89\x66\xb1\xfe\x58\xea\x62\x55\x24\x9b\x4a\x7c\x6b\x28\x5e\x45\x6b\xb1\x44\x9c\x06\x91\x25\xda\xe0\x6a\xde\xbb\x06\x5b\xa2\xb5\x56\x71\x99\xb4\xd1\x25\xda\x27\xa9\x1d\x3d\xb3\x8a\xf8\x81\x44\xd2\xfb\xc7\x2a\x29\x8a\x06\xba\x48\x9a\xc0\x9e\x1a\xf1\x34\x92\xd5\x7e\xf4\x89\x3a\xa9\x4c\x80\x3f\xc1\x54\x08\x14\x4c\x88\x41\xc1\x38\x0a\x45\xb2\x79\xcf\x26\xc1\xa1\x60\x1a\x24\x0a\x46\xb1\x28\xea\x9c\x28\x64\xe1\x7a\xd1\x28\x92\xd5\x06\x4e\xd5\x87\x47\xd9\xd8\x2d\x90\xaa\x51\x44\xca\xe6\x1a\x4d\xac\x4a\x8f\x49\xd9\x58\x57\xd1\x2a\x3d\x2a\xe5\xc8\xfe\x5f\xdf\xe9\xec\xbf\xb3\xb3\xaf\x41\xac\x9c\x21\x53\x40\x8c\x4a\x83\x25\x3a\x05\x96\xf8\x14\xd8\x21\x54\x60\x87\x51\x81\x1d\x4a\x05\xd6\x38\x15\xd8\x22\x55\x5d\x03\x94\xe8\xa9\xc6\x0a\x25\x90\x0b\xae\xf0\x2a\x70\x85\x58\xf5\x1a\xc2\xc6\x53\xfb\x0c\xa1\x83\xbb\x7d\x86\xd0\x11\x5e\x70\x8a\x5c\x81\x3b\xec\x0a\x6c\xd1\x2b\xb0\xc2\xaf\xc0\x2d\x82\x05\x26\x18\x16\xe9\xab\x50\xa2\x5b\x43\x28\x96\xf5\x9a\xf2\x54\x8f\x63\xd1\x1c\xa9\x4d\x84\xab\x7d\x44\x8f\xb6\x36\xd3\x60\x5c\x2a\x96\x45\x75\xac\xf4\xa1\x5c\x0d\x34\x8b\x64\xba\x17\xe7\xb2\xe9\x86\x21\xa4\x4b\x78\x5b\xa8\x8e\x90\x5e\xac\xab\x81\x69\xd9\x6e\xaf\x9b\x68\x57\x03\xd5\xa2\x35\x79\x1a\xbc\x0b\x26\x41\xbc\x60\x0c\xf3\xa2\xef\x34\x59\x3f\xea\x45\x7b\x5c\x25\x1e\xa6\xc7\xbd\x48\x36\xdd\x23\x62\x30\x0d\x26\x06\xd3\xa2\x62\x30\x09\x2e\x06\x63\xc8\x18\x79\x1c\xcc\x7a\xb1\x31\x5a\xcf\xd6\xbd\xaa\x43\xc7\xec\x9d\x77\xce\xfd\x2d\x4d\xe4\x4c\x0d\xdf\xd0\x2c\x52\x43\x3e\x8e\xd0\x33\x70\x84\x9f\x01\x1d\x41\x03\xfb\xd0\x93\x0b\x14\x0d\x1c\xe0\x68\xe0\x3c\x8a\x66\x89\xa5\xc1\x2b\xc6\xd0\x1c\x10\x6a\x30\x11\xa5\x06\xe3\xa4\x1a\xd1\xc5\x3f\x46\xab\xd1\x43\xda\x82\x70\xeb\x25\xd6\xc8\x31\xa4\x9a\x72\xd3\x50\x6b\xb4\x9d\x89\x63\xd2\x0d\xa6\xa3\xdd\xc0\x80\x78\x23\x19\x2d\x97\xde\x7d\xd4\x1b\xc9\xa8\x42\xca\x75\xc9\x37\xda\xc3\x6a\xd1\x72\x5a\xfa\x8d\xe6\x90\x57\x88\x39\x5d\x54\x91\x78\x7a\x88\x0d\x45\x15\x1d\x84\xd1\xac\xc9\x39\x98\x8e\x9e\x83\x71\x82\x8e\x1c\x4d\x9f\x88\xa2\x83\x51\x92\xce\x87\xea\x5c\x87\xea\x54\xb2\x8e\x3a\xe3\x2a\x07\x9e\xf4\x74\x9d\x9b\xe6\x4e\x13\xa5\x3b\x6a\x0c\x84\x4c\xe7\x81\x25\xa1\x07\x76\x94\x1e\x38\x21\xf5\xc0\x05\xad\x07\x56\xc4\x1e\x58\x51\x7b\x60\x47\xee\x81\x25\xbd\x07\x56\x04\x1f\xd8\x52\x7c\xe0\x82\xe4\xeb\x1a\xa1\x07\x81\xec\x89\x3e\x70\x44\xf5\xf5\xd9\xa1\xc5\x5b\xec\xe9\xbe\x1e\x3b\xc4\xf8\x8f\x1b\xca\x4f\xd3\x26\x6c\x5a\x54\x20\xa5\x46\x05\xa7\x0a\x30\xe8\xd4\x94\x60\x9c\x26\x15\xdd\xa1\xc7\x4a\xab\x0a\x26\xa9\x55\xd1\x16\xeb\x54\xac\xb6\xe9\x55\xe3\xc3\x8e\x6f\x42\x5e\x83\xf9\xbb\x2a\x2f\x4d\x31\x31\x99\xab\x85\xd2\xaa\x4e\x00\xb2\xea\x55\x4f\xfb\x75\xca\x1f\x83\xf6\xab\x86\x91\x67\xfd\x3c\xeb\xe7\x59\x3f\xcf\xfa\x79\xd6\xcf\xb3\x7e\x55\xf1\xac\x9f\x67\xfd\x86\xcb\xef\x93\xf5\xd3\xad\x7a\x3c\xe9\xe7\x49\x3f\x4f\xfa\x79\xd2\xcf\x93\x7e\x9e\xf4\xf3\xe1\x43\x4f\xfa\x79\xd2\xcf\x93\x7e\xe6\xf6\x3d\xe9\x47\xa9\xef\x49\x3f\x4f\xfa\x79\xd2\x8f\x66\xc8\x93\x7e\x4a\xf1\xa4\x9f\x27\xfd\x3c\xe9\xe7\x49\x3f\x4f\xfa\x79\xd2\xcf\x93\x7e\x9e\xf4\xf3\xa4\xdf\xb8\x21\x4f\xfa\x79\xd2\xcf\x61\x04\xcd\x73\x7e\x9e\xf3\xf3\x9c\x9f\xe7\xfc\x3c\xe7\xe7\x39\x3f\xcf\xf9\x79\xce\xaf\xbf\xb9\x9e\xf3\xf3\x9c\x9f\x6a\xc4\x73\x7e\xd4\xda\x9e\xf3\xeb\xd8\xf1\x9c\xdf\xb0\x1d\xcf\xf9\x79\xce\xef\x3b\xe5\xfc\xf6\xe1\xfa\x6b\xdb\xe1\x70\x7c\xe4\xef\x5a\xd3\x0a\x8a\xb5\xc9\xdc\x2f\x96\x0d\x54\x07\x71\x10\x7f\xe5\xbb\xe1\xfb\x40\x1c\x2c\xee\x64\x73\xc4\xac\xab\xa8\xf8\xd2\x26\x4c\xd9\x9a\x4e\xe9\xb9\x98\x0d\xce\xca\x26\x50\xcd\x1c\xf5\x61\x57\xad\x25\x38\xd9\xd4\x03\xea\x1f\xa5\x5b\xec\x90\xb1\x02\xd8\x13\xa3\x01\x3f\xa9\x24\x90\xb1\x88\xad\x95\x03\xa4\x72\xaa\xd9\x07\x19\xff\x2b\x4d\x0e\x8f\x78\x3f\x08\x6f\x53\x31\x0e\xab\x33\x78\xd5\x79\xba\xb3\xf3\x9b\x85\x5c\x31\x7c\xba\xba\xbd\x5e\x7c\x38\xff\xe9\x7c\x71\x86\x9f\xbe\x56\xcb\xeb\x19\xbc\x5f\xae\x56\xcb\x4b\xec\x31\x2a\x5a\x0a\x6f\x6d\xbb\xd1\x56\x56\xcb\x6b\x74\x1d\x79\x97\xa8\x6a\xf1\x61\x57\x8f\x3e\xda\xfb\x19\xc6\x39\x7b\x44\x7f\x6a\xf9\x2e\x3f\xc8\x45\xed\xbf\xfd\xc5\xee\xe5\xbe\x6a\xde\x83\xcd\xeb\xc2\xdf\x96\x5d\x10\xbf\xa8\xa3\x1c\x1f\xb9\x0d\xa2\x48\xbe\x77\xcd\x97\xa3\x39\xd8\x51\x16\x8b\x97\xf6\x92\xe5\x5b\x3c\xf7\xe9\x6e\x0a\xbd\x69\x36\xe3\x77\x31\x8d\x2a\x2d\xb6\x9e\x4a\xef\x8a\xe7\xf0\x65\x27\xec\xdd\x35\xce\x10\x50\xe6\x53\xe1\x4f\x56\x03\x0b\xd2\xf5\xc9\x62\x4a\x0c\x9e\x0f\xb6\x34\x11\x87\x05\x6a\x3a\xe7\x79\x1b\xae\xb7\x25\x98\x89\x36\xd9\x40\x91\xf6\x41\x01\x73\x5a\xbe\x18\x05\xcc\xd8\x74\x43\xb6\xa7\x7d\xb4\xcd\xcb\xc5\xea\xe3\xf2\x4c\xf9\x46\x94\xff\x26\x4e\x5f\x57\xff\x67\xfe\x2b\xfe\xe3\x51\xd6\x3d\xaf\xed\xdc\x7e\xba\xac\xfe\xbe\x98\xaf\x16\xb7\xab\xe3\x7c\x56\xba\xf7\x49\x35\x41\x70\x89\x9c\x34\x7a\x91\x5c\x15\xed\x05\x3a\x69\xf4\x39\xb5\xaa\x7c\x44\xc6\xb5\x25\xc3\x1b\xa4\x2f\x73\x9a\xb8\x84\x9b\x2d\xca\xad\xa6\x15\xdf\xd5\x16\xc5\xb2\x81\x6a\x14\x78\xbf\x8f\x5e\x20\x28\xba\xbe\x79\x3c\x0f\xf5\xcc\x83\x87\x9c\xa5\xb5\xce\x09\x9f\x9e\xf9\xcc\x75\xc8\x71\xce\x63\xb2\x18\x89\x9d\x3e\x83\xbb\xef\xb4\x9d\x2e\xc3\x14\x9a\x0c\x83\x7a\x0c\x95\xb6\x02\xd1\xd3\xd1\xd6\x62\x68\xea\x2a\x50\x16\x6e\xc7\xd0\x61\x18\xd1\x60\x10\x7a\x0a\x68\xa3\x13\xe8\x2f\x0c\x6a\x2f\x34\x74\x14\xd0\x86\xf5\xba\x0b\x4d\x0d\x05\xb4\xc9\x1e\xcd\x85\x86\x7e\x02\xda\xe4\xa0\x3c\x40\x79\x50\x0c\xff\xf0\xf5\x5a\x0b\x76\xc1\xae\xb6\xce\x82\x5e\x33\x01\xbf\x5e\x53\x34\x16\x5c\x0a\x50\x14\xa7\x0e\xad\xb5\x12\xec\x75\x12\xdc\xcd\xbc\xd6\xfa\x08\xaf\xf3\xdd\x76\xa2\x8d\x30\x8d\x2e\x82\x7b\x4d\x84\x31\x3d\x04\xd2\xa9\x93\x01\x2d\x04\x55\xd7\x00\xff\xe9\xd4\xe8\x20\x68\x34\x0d\x88\x33\x7b\xa9\x81\xe0\x5a\xcf\x60\x50\xcb\xc0\x66\xd3\xac\xd3\x31\x50\xcf\x0f\xa1\xad\x0e\x68\x18\x94\x7a\x04\xf8\x0f\xc7\x80\x7e\x81\x8d\x60\x9a\x56\xbb\xa0\x58\x54\x51\x87\x56\x5b\xb7\xc0\x52\xc6\xa1\xa5\x59\xa0\xd3\x1f\x20\xdf\xbc\x38\xb1\xd4\xa3\x3d\x80\x36\xda\x38\xb3\xd4\xaf\x3b\x40\x98\x5e\xa6\x38\xca\x34\xc9\x31\xa6\xa9\x8e\x30\x8d\xe8\x0c\x50\x3c\x51\x53\x68\x0c\x4c\xa0\x2f\x30\xac\x2d\x40\x76\x70\xf5\xea\x0a\x34\x35\x02\xa8\x56\xdb\x9a\x02\x2d\x7d\x00\xaa\xd9\x5e\x3d\x01\x9d\x36\x00\xf5\x22\xaa\x96\x80\x5e\x17\x80\x6a\x5b\xd5\x11\xd0\x6b\x02\x38\xb0\xfd\xee\xaf\x5d\x65\x84\x5a\x0f\x80\xfe\x50\x2d\xf5\x03\x68\x1e\x4a\xba\x6e\x80\x8d\x66\x80\x85\x5e\x80\x85\x56\x80\x85\x4e\x80\x9d\x46\x80\x95\x3e\x80\xbd\x36\x80\x03\x5d\x00\x27\x9a\x00\x4e\xf4\x00\x9c\x68\x01\x38\xd1\x01\x70\xa2\x01\xe0\x8e\xff\x77\xc4\xfe\x5b\x71\xff\x74\xe6\xdf\x21\xef\x3f\xca\xfa\x57\xdc\x3e\xde\x5f\xd7\xcf\xf9\xab\xcc\x3e\xfe\xc4\x83\x8e\xf1\x57\x79\x7d\xd2\x49\x8d\x16\x20\xda\x61\xf5\xf1\x0b\xfb\x26\xdb\x3f\xc4\xe9\xe3\xb7\x21\x15\xd7\xdf\xcf\xe8\x53\x77\x4c\x43\x7c\x3e\xda\xa6\xf0\x18\x0c\xb1\xf9\x84\x2d\x48\xc9\xf2\xf7\x71\xf9\xf8\xde\xac\x38\xfe\xa9\x98\x7c\xf7\x3c\xfe\x20\x8b\x5f\x78\x27\xf0\x2b\xfc\x3e\x0e\xbf\x66\xea\x09\x6f\x96\x86\xc1\x9f\x80\xa7\x9f\x80\xa5\x9f\x90\xa3\x77\xcf\xd0\x0f\xf2\xf3\x92\x85\xa7\x7a\xb1\xbb\x53\x63\xdd\x2b\x96\x1e\x4a\xc9\xcd\xdb\x81\x76\xbd\x3e\x83\x36\xff\x6e\x35\x76\xc9\x62\xc7\x4e\xb8\x77\x17\xcc\x3b\x91\x77\xb7\x02\xc4\xed\x39\x77\x5b\xc6\xdd\x5d\xfc\xc3\x92\x6d\x7f\x9d\xe8\x87\x03\xae\x7d\x0a\xa6\x7d\x84\x67\x07\x3c\xcc\x3d\xc6\xb2\x37\xb8\x74\xb4\xed\x3e\x8e\xbd\xc5\xa4\xa3\xed\x76\x19\x76\xe7\x3c\xfa\x44\x2c\xfa\x18\x87\x4e\x5f\x2c\xf6\x30\xe8\x2a\x4f\x8e\xff\x94\xb7\xf9\xf3\x0e\x4b\x4e\x8b\x31\xb5\xd8\xf3\x16\x47\xfe\x3b\x89\x01\x59\xf1\xe3\x13\xb1\xe3\x23\xdc\xb8\xb3\xb8\x95\x03\x66\x7c\x98\x17\x97\x01\x13\xca\x48\xe8\x0d\xb0\x34\x02\x26\x6e\x17\x4b\xb6\x07\x1e\x74\x01\x96\xb2\x4b\xc8\xc7\xaa\x5a\x7c\x78\x9b\xf5\x26\x84\x31\x74\x6c\xb8\x86\xf3\xc6\xbf\x0a\x55\x6c\xc5\xc9\xb1\x17\x9a\x97\x9a\xcc\x76\xdb\x70\xdd\x16\x4c\xb7\x3d\xcf\x6d\xcd\x72\xd3\x39\x6e\x3a\xc3\x6d\xc1\x6f\xdb\xb0\xdb\x74\x6e\xdb\x8a\xd9\xb6\xe6\xb5\xed\x59\x6d\x17\x9c\xb6\x0b\x46\xdb\x05\x9f\xed\x82\xcd\x76\xc1\x65\xbb\x62\xb2\x2d\x78\xec\x5c\xdd\x17\x5d\x04\xf1\xe3\x21\x78\x64\xa6\xf3\x26\x61\x77\xd8\xda\x15\xae\xf4\x0d\x30\x34\xa6\xee\xad\x24\x70\x5c\x1d\x08\x7b\x60\xb9\x0a\xd1\x98\x7d\x41\x0e\x71\x98\x2f\x9f\x58\x9a\x86\x9b\x23\xf6\xc3\xa7\xc6\x55\x29\x37\xcf\x97\xb9\xbc\xe5\x7c\x61\x26\x96\x52\x85\x8b\x50\x2e\xa2\x44\x57\x20\x16\xd4\x6d\x51\xc5\x38\x89\xa5\xf7\xa5\x38\xdb\x2a\xae\xf4\x1c\x22\x96\xfd\x49\x71\x6f\x10\xc4\x2f\xb2\xba\x70\x94\x07\x6b\xc1\x0f\xc5\x02\x39\x46\x36\x52\x9e\x30\xe6\x37\x2e\x69\x41\xbe\x58\xa8\x5c\x77\x81\xfc\xfb\xb7\x3b\x7e\xad\x3b\x73\xb8\x3e\x65\x0f\x5f\x9e\x7e\x7c\x9b\xb2\x2c\x7f\xfb\xf4\xe3\xdb\x12\x67\x3f\x95\x0b\xe8\xb3\xa2\xcb\x13\x0c\x7b\x5f\x78\xc0\x62\xb8\xbb\x6c\x19\xb9\x1b\x1a\x91\xf9\x36\x65\xd9\x36\x19\x8d\x8d\x99\x46\xc2\xda\x2f\x5e\x65\x7e\xb0\x56\x6b\x90\x15\x75\x20\x0a\x63\xbe\x66\x4f\x83\xe7\x18\xb6\x49\x1a\xfe\x8b\x8f\x36\xbe\x37\x96\x3e\xf0\x91\xbe\xe1\x4f\x66\xbd\x0d\xd2\x11\x5f\x02\xc1\xf5\x87\x77\xf4\x19\xb9\xf5\x10\xdc\x0e\xcd\x4f\x65\xf8\x34\x70\x00\xca\x3a\x89\xcc\x92\x26\x23\xa7\xaf\xf6\xc1\x66\x7e\x19\x4c\x35\x62\xcf\x88\xeb\x18\x3a\xe0\xba\x07\x8e\xb3\x3c\xc8\x99\xec\x12\xe1\x89\x12\xf3\x58\xf5\x8e\x99\x7d\x1a\x44\x0b\x4a\xb7\xb8\xc0\x78\xe5\xf4\x1a\x40\xd1\x58\xe2\x79\xae\x0f\xcb\x8b\xe5\x8d\xca\x28\xfe\x7c\xb3\xf8\x3c\x83\xf7\x17\x9f\x16\xe2\xef\xc5\xd5\x0c\x3e\x2f\x2e\x2e\x96\xbf\x98\xed\x2d\x97\x37\x7c\x09\x20\x4e\x0f\x99\x9c\xee\x31\xdd\x2b\x9d\x74\x9b\x6a\x54\x8b\xdf\x8d\xd1\x0f\xf9\x0d\x9b\x5a\x5c\x98\xac\x93\x4e\x8a\x6e\x33\xfa\xa9\xec\x35\xa3\x9f\xde\x18\xdc\x39\x4a\xd9\xc2\xee\x1d\xc4\x28\x58\xd8\xbe\x87\x58\xfd\x89\xee\xbb\x58\x75\x4c\xf1\x26\x32\x58\x1f\x52\x11\x31\x45\xbe\x90\x55\x53\xcc\x5e\x4a\x23\x9b\xc3\x02\x13\x30\x7f\xbf\xfc\xe7\x62\x06\xef\x17\x17\xcb\x5f\xdc\xbe\x5a\x54\x65\x88\x13\xd9\x26\xb3\xf7\x6b\x61\xf2\x2e\x44\xc1\x3d\x8b\xa6\x1f\xb4\x17\xfc\x32\xd8\x01\x34\x97\xad\xab\x46\x0e\x62\xc4\x88\x27\x6b\x7e\x5b\xf1\xc1\xf0\x38\x44\x29\x56\xb1\x49\x0e\x66\x83\xac\xd5\x0f\xff\xe4\xed\xc2\xf6\xc3\xaa\x99\x65\x53\xed\x88\xfa\xbf\x19\x0d\xf8\x6c\x9b\x1c\xa2\x8d\x80\x03\x65\xa4\xb7\x04\x3e\xe2\x20\x0f\x9f\x18\x64\xeb\x20\xaa\xae\x22\xd7\xc1\x83\xeb\xd6\x70\xc7\xb2\x6d\xf8\x90\x9f\x1d\x52\x03\x40\xda\x78\x08\x69\x36\x8e\xca\x65\x30\xcb\x58\x06\x9b\xa2\x56\x83\x0f\xcd\xf6\x51\xf0\x02\x41\xa1\x68\x10\x66\xa3\x93\xa9\x5c\xc2\xc2\xbc\x51\x43\xfe\x1b\x64\xe1\xee\x10\xe5\x41\xcc\x92\x43\x16\xbd\xf0\x1e\x7e\xce\xcc\x32\x8a\x3e\xa4\xc9\x0e\xf2\xe7\x84\x9b\x08\xa3\x20\x3d\x89\x58\xfc\x98\x17\xdb\x58\xe9\x6c\xcd\xe0\x4f\xec\xf4\xf1\x74\x06\xcf\x8c\x7d\x3d\xe1\x1b\xab\x13\xfe\xd7\x88\x61\xf9\xe0\xb2\x3f\x9f\xaa\xb7\x5f\xfa\x6d\xf7\x49\x16\xf2\xa7\x2d\x8f\xe3\x87\x39\xac\x47\x91\xc8\x24\x8e\xda\xaa\xbf\xe2\xee\x0b\x6a\x44\xec\x42\xc5\x01\xae\xe4\x01\x2e\xce\xaf\x16\xb0\x8f\x46\x69\x21\x3e\x1a\x86\x06\xd7\xb7\xf9\xb7\xd0\x68\x3f\x64\xb0\x56\x6f\x0d\xa8\x5f\xb9\x69\xb3\x0a\xa8\x0f\xe7\xb8\xdd\xce\xe0\xac\x17\xf9\x6d\xaa\xf6\x57\x08\xbe\x85\x23\x8e\x0c\xf3\x2d\x82\xe1\x3c\x8f\x9a\xe5\x49\x73\x7c\xa7\x07\xe4\x1c\x5f\xcc\x39\xe3\xb7\x0c\x72\x92\x9a\xf6\x56\x6e\xf9\x25\xcc\x2b\xe1\x07\x89\xb8\x80\xd1\xa2\xaa\xbb\xa4\xe2\x7d\x24\x3b\xe1\x14\xde\xbf\xf0\x99\x3c\x38\x44\xf9\x0c\x02\xb1\x55\x0f\x4c\x3e\x63\x72\x9e\x2f\x90\x79\x8d\xe2\xd6\xed\x87\xf9\xc5\x02\xab\xb6\xc5\x5f\xfc\xf9\xcd\x0c\x2e\x96\x3f\xff\xf0\x6e\x7c\xbd\x64\xb6\x5a\x3a\xe9\xb6\xc5\xa0\x8e\x6c\x8a\xc9\x0f\x79\x5b\x07\x7e\xf7\x32\xdd\x24\xf4\x79\xa2\x49\x68\xdc\x2e\x62\x12\xfa\xec\x27\x21\x7d\xf9\x9d\x4f\x42\x9f\xfd\x24\x24\xca\x77\x3d\x09\x3d\x87\x9b\x7c\xab\x6f\xdd\xb8\xe0\xde\xb8\xac\x5e\x6b\xb8\xfd\xc2\xaf\xa6\xfd\x61\xe7\x6d\x11\x0d\xab\xcf\x85\x44\x6c\x06\x3b\x16\x64\x87\x54\x6e\x28\x1e\xd3\x70\x03\xd9\x7f\x1f\x82\x54\x1f\x88\xf9\x76\x9d\xf4\x4c\x15\x13\xdc\xd6\xaf\xd7\x89\x7e\x36\xec\xdc\xd5\xbf\x58\x9a\x9c\x84\xf1\x86\x7d\x63\x9b\x62\x91\x2c\x19\xfb\xf2\x2e\xdb\xf7\xd6\xf3\x68\x53\x16\xc9\xdd\x54\x31\x87\x46\xec\x21\x07\xb6\x79\x64\xf5\xf1\xfe\x50\xbb\x85\x7d\x39\x66\xbf\x7c\x7e\xf5\x7e\xc9\x93\xfd\x40\xb7\x88\x56\xfe\x3f\xed\x2e\xe8\xcc\xa4\x6d\x05\xc8\x40\x39\x96\xa4\xdc\xcb\xf9\x6e\x77\xc8\x83\xfb\x88\xc9\x7d\x51\x75\xe2\x9b\xd7\x2c\xdb\xb0\x29\xa7\xcf\x53\xe5\x1a\x5f\x0f\xf7\x2c\x8d\x59\xce\xb2\x93\xb0\xb4\xd2\x0a\x51\x14\xe1\x23\x7c\x93\xaf\x65\xc5\xbe\x56\x17\xdf\x67\xfe\x13\x08\xab\x33\x79\x15\x0b\x80\x6d\x66\x79\xf9\x94\x09\x70\x61\xad\x7e\xb4\x4f\x2a\xc3\x7f\x87\x0f\x51\x72\xd8\x94\xff\x77\x17\xc4\xc1\x23\x4b\xdf\x76\x1b\x5b\x04\xbd\xfe\x2e\xba\x51\xf9\xf7\x7d\x90\xb2\x38\x6f\x5d\x3f\x4d\x9e\xe5\x37\xa9\xdb\x51\x9d\x25\x54\x5b\x77\xb2\xac\xaa\xfb\x49\xeb\xeb\xa7\xfb\x6d\xa7\x5b\xd7\x49\x2c\x54\x97\xc2\xac\x90\x8c\x2a\x72\xd5\x89\xd3\x53\x7c\x2f\xbf\x0f\xd6\x6c\xc3\x9b\x9c\x95\x0a\x10\x7c\xea\x7b\x64\xad\x03\xac\xe2\x9c\x7f\x9a\x06\xf1\x23\xdb\x28\x21\xb2\xd3\x4e\x4b\xd7\x49\xfc\x10\x85\xeb\xbc\xd5\xeb\x7c\xe8\x77\xda\x7b\x02\xbb\xc6\x37\x5c\xf9\x0f\xeb\x24\x3a\xec\xe2\xce\x7f\xe8\x5b\x89\xf1\x3b\x68\x4f\x2c\xfd\xb1\xc4\x6e\xb7\xb7\xdf\xe8\x4e\x4f\x8a\x2e\x4a\x1e\xaa\x1e\xad\x9d\x2c\xed\x79\xce\x28\xd0\x67\x12\xda\xeb\x09\xe6\x0d\xae\xc6\x47\x47\x8b\xe6\x5e\xc7\xd6\xb7\xcf\x2c\x7c\xdc\xe6\xc7\xfb\x4a\x8b\xcb\x99\x4d\xdc\xd5\x84\x2b\xdb\x28\xe7\x8e\x30\xe3\x4f\xeb\xb4\x7c\x6c\xe5\x7f\xeb\xdd\x32\x94\x4e\xb3\x60\xf3\xff\x1d\x32\x79\x36\x78\x5b\x99\x93\xcf\x5d\xba\x10\xb3\x75\xca\x58\x0c\x7f\x6a\x4c\xf3\x3d\x26\xf7\x8c\xa5\xd9\x9f\x4f\xe1\xe7\x94\x05\xa5\x54\x9f\x6c\xc6\x0c\x1e\x1b\xff\xb6\x6d\x34\x5b\x36\xb6\xd7\x4f\x57\x37\x40\x1c\x62\x48\x76\x61\x9e\x0b\xc1\x89\xda\x71\xfa\x43\x25\x11\xf7\xbc\xe5\x1f\xad\x94\xc5\x1b\x26\x44\xdc\xf4\x4b\x2f\xfe\x9a\x0f\x3d\xd6\xfe\x30\x7c\x77\x61\xd5\x99\x32\x7a\x9f\x59\xe9\x9b\x2c\x1a\xa0\x9f\x57\xa4\xd7\xb6\xf7\x91\xf1\xe7\x3b\xd0\x30\xa3\x28\xbb\x79\x74\x7d\x30\xaa\x6e\xb0\x39\x36\x7a\x29\x87\xfa\xd0\x6c\x07\x7a\x1f\x05\xf1\xd7\xa1\xa5\xbe\xe1\x3e\xbe\xf5\x70\xdf\x73\xb3\x26\x3f\x37\xb8\xbb\x31\x5b\xad\xe0\x88\xb8\x23\xf9\x91\x1a\xf4\xa3\xf6\x7f\x78\xda\xe5\x04\xbe\xc9\xed\xe1\xe0\x6f\xb2\x75\x92\xb2\x75\x90\x0e\x1d\xdf\x39\x81\x9c\x7d\x1b\x36\x13\x25\x8f\xd9\x75\x10\x0f\xec\xd6\xab\x5f\xb8\x7f\x6c\x17\xa3\x17\xc7\x3d\x3a\x73\x7b\xdf\xdf\x83\xb8\x1f\x19\x76\xa6\xfe\x1d\xb3\x44\x2f\x08\x8f\x08\x25\x91\x4b\xeb\x1d\x91\x6d\x92\x07\xd0\xd6\xdb\x24\xc9\x58\x56\x88\x61\x47\xc9\x23\xb0\x38\x37\x92\x61\x12\x9c\x70\x7e\x48\xe3\x53\xb8\x65\x0c\x7e\x9b\x6f\x9e\x82\x98\x2f\x0d\xf9\x63\x87\xff\xf7\x20\xd5\x3f\xfb\x0f\x9e\x45\xc9\xe3\x63\x18\x3f\xbe\xdd\x24\xeb\xec\xed\x53\xc8\x9e\xdf\x06\x85\x85\x93\xff\x96\x95\xff\x3c\x1e\xd2\x5c\xc6\xd1\x4b\xb3\xd5\xf2\xa6\x76\x81\x38\xf9\x58\x29\xca\x8b\xc5\xa8\xf9\x39\xfa\x79\x0c\xe2\x4b\x50\xd6\x16\xe6\x84\x6a\x64\xd4\xbc\xd6\xb0\xa1\x72\x93\xc0\x37\x5f\x86\xe7\xab\x4c\xd4\x1c\xda\xeb\xd0\xe6\x55\x30\xa3\x60\xb5\x95\x7b\x3c\xb1\x42\x2d\x1e\x45\x9d\xcb\x46\xea\x09\x46\x91\x89\x4c\x33\x9f\x90\xf8\x92\xed\x14\x3e\xc8\x43\x0c\x82\x83\x8f\x5e\xea\x54\x38\xbc\xf7\xb3\xc3\x7e\x9f\xa4\x39\xdb\xf0\xa5\xc8\xb8\x07\xaa\x3c\x6b\x59\xee\x2d\xa4\x7e\x6b\xe1\x50\x2b\xf7\xca\xdb\x24\xcb\xcb\xab\x8c\x3d\x55\x12\x46\x4b\xc1\x66\x0d\x31\x59\x9a\x0f\x94\x50\x4b\xbf\xa7\xd5\x17\xd2\x4e\x57\x5f\xfa\xf6\xbf\xcd\x52\x4d\xd5\xee\x3f\x65\xb7\x06\x5f\x01\xcc\xa7\xcc\xc4\x5e\x6b\x96\xad\xee\x0e\xb2\xc3\x6e\x17\xa4\xe1\xbf\x04\xac\xda\x12\xe9\x3b\xea\x22\xc5\xfa\xab\x67\xb2\x40\x81\x4a\xc0\x77\xb8\xc1\xad\xa3\xf7\x0e\xbe\xb5\x8f\xc1\xe1\x91\xfd\x33\x64\xcf\x53\x1c\x67\x2d\xd4\x72\xca\x4b\x98\x56\xc3\x8c\x2d\x53\xe3\xca\x40\xfb\x85\x4f\x8c\x92\x04\x96\xfb\xbd\x72\xd4\xe5\x89\x38\x77\x01\x81\xec\x97\xd1\x77\xd6\xe0\x40\x32\xee\xf8\x6d\x94\x3c\xb3\xf4\x7d\x72\x88\x8d\xe4\xde\x8f\x7c\xe6\xe8\xa2\x6a\x9c\x41\xdd\x6e\x64\x8c\xd7\x86\x7b\x5e\xbd\x3e\x4c\x2b\xba\xd9\xac\x1b\x65\xe9\x1c\x61\x2a\x0e\xcf\xc8\xf3\x48\x41\xf4\x1c\xbc\x64\x70\xcf\xca\xed\xbf\x91\x4d\xa1\x28\x9a\xa4\x35\xec\xc8\x9b\x36\xde\x9c\xc3\x7e\xff\xfd\x3e\xab\x4f\x55\xe3\x28\xcf\x4a\xdc\xda\xd1\x9e\x55\xc4\x44\x4e\x23\x43\x3d\x75\xdc\x83\xca\xf6\x41\xfa\x55\xcc\xf2\xd3\x4e\x72\xb7\xca\x75\xa6\x98\xe9\x50\x57\xc0\x4e\x77\xa2\x9b\xdc\x4c\x77\x26\x5f\x32\x90\xdb\xce\xea\x8e\x56\x2f\xfb\xb1\x31\x8e\x99\x45\x77\x61\x3c\xc7\x27\xcf\xb0\x3b\x98\x7a\xd9\xb9\xa6\xed\x24\x99\xc4\x8a\xba\x03\xef\x57\x16\xaf\xcd\xa4\xeb\x8a\xd3\x98\xf2\x85\x0b\x77\xfb\x88\xf1\x76\xb1\x0d\xdc\xbf\x14\xb8\x75\x25\x8a\xb6\x0b\xe3\x70\x67\xa8\x50\x52\x0b\x30\xc8\x63\x85\x65\x26\x40\x71\x74\xba\xb9\x4c\x43\xa4\xf1\xfc\x89\xbf\xd0\xdf\x02\xde\xc6\x19\x84\x0f\x8d\x6c\x1b\x19\xec\x0f\xf7\x51\x98\x6d\x19\xef\x8c\x35\x03\xf6\x34\xbc\xe8\xa9\xcb\x0f\xef\xf8\x7d\x1d\x72\x96\x41\x98\xc3\xb3\x98\x70\xe2\x84\xef\x72\xbf\xf2\x36\xc6\x19\xab\x69\xbf\xc0\x68\x8e\x2c\x6e\x5e\x36\x2d\xc8\x85\x6c\x99\xbc\x44\x95\x7c\x24\x93\x79\x36\x10\x59\x1c\xc2\x0c\x12\xf1\xf4\x83\x48\x04\x86\xd8\xb7\x30\xcb\x33\xb9\x15\x0c\x32\x08\x60\x1b\x9a\x88\xa7\xa8\x2f\xd2\xf4\x63\xfd\x16\xf3\xe2\x82\x9b\x09\x4f\xbd\x26\x09\x23\x50\xd3\xef\xf2\xa6\xf0\x0f\x93\xe8\x3c\xf9\xa2\x18\x0a\x0e\x8a\x39\xb3\xf0\x9c\xcb\x57\x8c\x37\x50\x77\xf0\xe4\x7a\x7e\xf3\x8f\x2f\x1f\x3e\xce\x6f\x56\x5f\x56\x9f\xaf\xd1\x67\x50\xa0\xb0\x70\x71\x7e\xb5\x98\x15\x7f\xbf\x9f\xdf\xb8\x45\x08\x86\x1a\x89\x30\xc0\xdb\x88\xf8\xf9\xfb\x91\x03\x2d\xa6\x24\xa3\x85\x1f\xc8\x98\x9f\xeb\x1e\x5a\xaa\x5b\x57\x9f\x05\xaf\xc4\x30\xf3\x92\x1c\x1b\xed\x8e\x62\x59\x54\x7f\x90\x1f\xc3\xa7\xae\xfc\xdb\x9b\x37\x15\xea\x22\x06\xd6\xf8\x7c\xf0\x93\xcc\x02\x13\xac\x73\xbe\x4e\x92\x4b\xb0\x6f\xb3\xd6\xb5\x42\x29\x79\x04\x9b\x20\x36\xc9\xb5\x29\x59\xb8\xf0\x01\xbe\xf1\x9a\xd5\x92\x4d\x59\x8e\x95\xc6\xea\xfe\x19\xf7\xe9\x6c\x83\x1c\xf2\x34\x7c\x7c\x64\x29\x5f\x0c\x46\xc9\xf3\x8c\xdb\xac\xc3\x77\xed\x6b\x8c\x9a\x6c\xb7\xa1\x75\x8d\xe0\x3e\x79\x62\xa7\x70\x2b\xcf\xca\x47\x2f\xfc\xa3\x33\xee\x17\x12\x7a\x92\xbc\xe6\x5b\xd1\x46\x08\xe0\x39\x48\x63\xf9\x0d\xd5\x5e\x67\xd4\x64\xc3\xda\xac\x96\xfd\xeb\x3e\x9f\xf2\x3a\x66\x43\xea\x04\x0e\xb1\x78\x38\xdf\xa4\x14\xfd\xfe\x90\x8b\x4f\x60\xe3\x51\x4b\x4b\xa7\xf0\xa7\x33\xb3\x27\x9f\xa7\x87\xdd\x3e\x2b\xdb\x71\xfa\x67\x80\x79\x26\xd4\x56\xca\x8f\xb6\xc8\x3b\xb5\x61\x29\x5f\x47\x1a\xf8\x7c\x64\x11\xe7\xfd\x85\xbb\x39\x89\xa2\xe4\x59\x88\x94\x26\x87\xb4\xf9\xe2\xc3\xff\x54\x00\x0e\xfc\xe7\xbb\x99\x90\x28\xca\xd9\x63\x92\xbe\x8c\xcf\x6c\x6f\xde\x9c\xcd\xaf\x7e\x5e\xdc\xbc\x79\x33\x13\xb7\x20\x1e\xca\xdf\xe1\xcd\x1b\xc1\x3d\xf1\x7f\xfd\xdf\x99\x72\x81\xff\x40\x5f\xe0\x97\xf9\xcd\xd5\xf9\xd5\xcf\x43\x57\x68\x5e\xe0\x07\x57\x77\x20\x31\xb2\x8e\xfd\x7f\x77\x76\x03\xf5\x05\xc4\x1e\x2e\xfe\xbb\x19\x86\x02\x3d\xb3\xc2\x0f\xef\x8a\x05\x98\x90\x9d\x54\xc7\xf8\xf8\x96\x2f\x00\xd9\x0d\x72\xd4\xce\xca\xfc\x32\xca\xfc\xf0\xc3\x3b\xb8\x3f\xe4\x88\x7d\x64\xb3\x79\xff\xfe\x0e\x02\x28\xba\xa2\x75\x11\xbe\x3e\x12\x1a\xb5\xf7\x2c\x7f\x66\x6c\xdc\x2e\xb7\x15\x6f\xe0\x3f\xf8\xff\xc0\xf2\x1f\x43\x6d\x2e\x9b\x60\x12\x33\xfa\x8f\xd6\xfd\xc1\x7f\x6a\xda\xcc\x2f\x6c\xf8\x9c\x7a\x27\x57\x61\xb7\xd9\xdf\xa7\x63\x6b\x8d\xef\x33\x46\x60\x98\x2d\x93\xb4\x2c\x35\x5e\x36\x60\x33\x3f\x1a\xa3\xf7\x04\xed\x10\x22\x7e\x6f\xdd\x45\x18\xfe\x7e\x2a\x02\x1f\xc5\xe0\x1b\x5a\x2c\x57\xf8\x46\x14\xbe\xa1\xcd\x82\xd5\xc7\x72\xf8\x18\xd5\x32\x1a\x8b\x8f\xa0\xf1\x11\x3c\x3e\x86\xc8\x47\x31\xf9\x28\x2a\xdf\x94\xcb\x47\x92\xf9\xf6\xaf\x28\x86\xce\xb7\x7e\x4d\xb1\x78\xfe\xa4\x80\xfe\x34\x88\xbe\x73\x48\x1f\xf7\xde\x51\x41\x7d\x0c\xaa\x6f\x0e\xeb\x23\x70\x7d\xfb\x91\x6c\x8a\xec\xbb\x83\xf6\x11\xd8\x3e\x3a\x30\x43\x08\xcd\x90\xe1\xfd\xc9\xf0\x7d\x1c\xc0\x6f\x68\x72\x1c\xf3\xef\x68\xc7\x4d\x17\x8f\x59\x19\x47\xca\x81\x3c\x7d\xa2\x2e\xa1\x3c\xc9\xda\x13\x29\x53\x2f\x88\x41\x2e\x7c\xe6\xad\xf3\x0e\x06\xce\x8e\x3c\x28\x04\x00\xb6\x0c\x6e\xf3\x60\xfd\x75\x93\x86\x4f\x2c\x2d\xe1\x7d\x98\x5f\x9f\xbb\x8c\x50\xe7\x2d\x19\x74\x73\x6f\xb3\x71\x0a\xfb\xde\x47\x69\x74\x70\x0f\x1c\x3d\x4f\xc2\x65\x95\x67\x2c\x6b\xc1\x3e\x48\x83\x1d\xcb\x59\x9a\x91\xf5\x05\x4d\x23\x6a\x20\xbe\x00\x0f\xa6\xad\xc5\xe6\xa5\x6f\x64\xcf\xc1\x69\x1c\x1a\x3f\x78\xd0\xe4\x9e\x47\xa7\xec\x99\xe2\xd9\xe3\x5b\xa1\x0c\x85\x26\xd7\x2a\x08\x84\xe0\x99\x94\x72\xb8\x0c\x8b\xd5\xa9\x20\x3f\x89\x98\x6e\x95\x32\xa3\x4e\x69\x6c\x6c\x73\x77\x88\xf2\x70\xdf\x92\xf1\xe7\xd3\x51\x9d\x92\xe6\x29\x64\x82\xca\x31\xb6\x59\xe5\x72\x37\xac\x81\x1d\x89\xd0\x0c\x45\x9a\x07\x78\xab\xe6\xd1\xf2\x53\xb4\x47\x26\x3a\xda\x5b\x96\xce\x47\x5d\xa3\x8d\x5d\x6a\x62\xe3\xa4\xe7\x8b\x20\x6c\x19\x90\x9c\xf1\x8f\x7a\x91\xff\x7e\x26\xdd\xd3\x05\xba\x82\xb2\x2a\x20\xac\xa7\x70\xc3\x1a\xb1\xd9\xb8\xca\xa1\xf0\x5b\x63\xe0\xfc\xd7\x6f\xc5\x61\xe5\x5a\x21\xf3\xf4\xe9\xc7\xd3\xfa\x75\xfa\x2f\xd4\x85\x05\x44\x27\xbc\xcb\x99\xa0\xc1\xee\xa3\x64\xfd\x35\x2b\x13\x38\x15\xd1\x56\x94\xa4\x28\xc8\x3c\xbd\x9b\x24\x66\x70\xcf\x1e\x12\x99\x9e\x5b\x93\xe6\x1e\x65\x51\x4d\x43\x5d\x1e\xb5\x95\x5a\xa3\xe8\x95\x99\x2c\x95\xaa\x7d\x0e\x11\x0b\xb2\x1c\xfe\xf6\xae\x7c\x92\x02\x4c\x0a\x1a\x6d\xc6\x0d\x91\x22\xf3\xbe\x50\x4e\x97\x1e\x40\x47\x49\xf8\xd5\xd4\x3d\xe5\x47\x0b\x64\x2c\xaa\x94\x66\x47\x59\xac\xe7\x39\xa1\x27\xab\x79\x4c\x58\x8b\x8d\x14\xc2\xc5\xb6\xa2\x34\x44\x16\xb8\xd7\x64\x3d\xb6\x4c\x62\xb4\xee\xa4\xcf\x3f\xfe\xec\xd6\x4d\xe1\x4f\xb1\x35\xcd\x27\xb8\xdb\x36\x64\x8a\x9d\xae\xf3\xa2\x4a\xef\x50\x67\x16\x31\x3d\x0d\x50\x96\x7b\x56\xc5\x82\x8b\x6f\x70\x37\x33\x0e\x2e\xc7\x4e\x37\xbf\xce\x0c\x9e\xb7\xac\x98\xb3\xca\x9d\x20\xca\xa4\xc8\xf7\xd2\x38\x55\x54\x6c\xfa\xea\xdc\x32\x84\x19\x45\xe4\x01\x7c\x38\xc4\xeb\x12\xe7\x2f\xbf\x0c\x41\x94\xb2\x60\xf3\x52\x66\xe6\x40\x19\x2d\x42\x26\x61\x3b\xd5\x7b\x9d\x38\xe4\x6a\x54\x18\x4c\x2d\xbc\x55\x45\xe6\x8d\x66\xfa\x18\x75\xee\xc6\x7e\x17\x1d\xa7\x8e\x19\x4b\x1b\x23\xfe\x95\x9a\x3a\x66\x30\x6d\xcc\x4d\xf9\x02\xa0\x3f\x7c\x2f\x62\x9e\x53\x8f\x7f\x89\xe0\x6e\x50\x2f\x21\x51\x46\x3b\x89\x67\xb4\xe9\xff\x91\x0b\x89\xee\xf8\xd1\xa6\xfe\x47\x59\x7d\x08\x53\x35\xf5\x8c\xa3\xb4\xff\xee\x53\xfe\x9b\xa4\xfb\xb7\xf8\x94\x0e\x67\xa2\xc1\xce\xa2\xfd\x69\xfe\xd5\xb4\xfd\x28\xbb\xda\x14\xff\x7d\x29\xfb\x71\xaf\xd5\x48\x0a\x1a\xac\x9b\x5a\x96\xd2\x59\xed\x3e\x55\xff\x04\x69\xfa\x27\x4f\xd1\x3f\x55\x7a\xfe\xa9\x52\xf3\x4f\x9a\x96\xdf\x45\x4a\x7e\x6c\xa2\x23\x6a\x2a\x7e\x6a\x1a\x7e\x62\x0a\x7e\x62\xfa\x7d\x62\xea\x7d\x7a\xda\x7d\x72\xca\x7d\xbb\x74\xfb\x96\xa9\xf6\xad\xd3\xec\x5b\xa7\xd8\xb7\x4e\xaf\x6f\x9d\x5a\xdf\x3a\xad\xbe\x9b\x94\xfa\x0e\xd2\xe9\x93\x53\xe9\xd3\xd2\xe8\x3b\x4a\xa1\x3f\x51\xfa\xfc\x69\x52\xe7\x1b\xa4\xcd\x47\x2f\x65\xc7\x52\xe6\x97\x29\xf0\x51\x46\x47\xd3\xe5\x37\xd2\xdf\xa3\x0c\xf7\xa6\xca\x6f\xa6\xbe\x47\x59\xec\x49\x93\xaf\x4d\x7b\x8f\x5c\xcd\xd7\x29\xf2\xfb\x53\xde\x23\xdd\xb1\x32\x3d\xfe\xc4\xe9\xee\x47\x53\xdd\x93\xf2\x7c\x0f\xa5\xb9\xef\xa4\xad\x47\xee\x46\xda\x29\xee\xf5\x29\xeb\x71\x5b\xd1\xb1\xf4\xf6\xfc\x56\x68\x1b\x1c\x6d\x6a\x7b\x9b\xa4\xec\x03\x69\xed\x3b\x69\xea\x71\x86\x5b\x29\xed\xb5\x29\xea\xe9\xcf\x6a\xd6\x9b\x9e\x1e\xd7\xca\x3a\xc1\xbf\xdb\xd4\xf4\xd3\xa4\xa5\xb7\x4e\x49\x6f\x9d\x8e\xde\x36\x15\x3d\x21\x0d\x3d\x39\x87\xbb\x5d\xfa\x79\x9b\xd4\xf3\x6e\x1c\xdf\x16\x29\xe7\x27\x76\x7b\x5b\xe6\x95\x37\xc8\x29\xcf\x17\x24\x58\x0f\x68\x7f\x3e\xf9\xa6\xff\x1a\x37\xf1\x0e\xe7\x92\x2f\x72\xc3\xe3\x3e\xee\x23\x79\xe4\x25\x1e\x8d\xf6\x27\x4f\x92\x43\x7e\x82\xfc\xf1\x26\xb9\xe3\xc5\xc2\x01\xe7\xa8\x75\x9e\x37\x7e\x3c\x67\x7c\xfd\xcc\x70\x4e\xba\xbe\x7c\xf1\x4d\x27\x3e\x61\x90\x6a\x1c\xfe\xaa\x13\xdf\xc2\xf3\xdd\xce\x13\x8f\x3b\x8d\x57\xdc\xf7\x70\x8e\xf8\x46\xce\x77\xe4\x27\x51\x9f\x1f\xde\x3a\x38\x31\xec\xa2\xa7\x45\xd0\x5b\x79\xe1\xf5\x5e\x75\xca\x04\x65\xe2\x55\xb7\x0d\x25\xeb\xbc\xea\xa4\x6e\x98\x22\xb7\xfb\x34\x79\xdd\x1d\xe6\x74\xc7\xbb\x39\x49\xb9\xdc\xa9\x79\xdc\x89\x39\xdc\xed\xf2\xb7\x5b\xe5\x6e\xa7\xe5\x6d\xa7\xe5\x6c\x27\xe6\x6b\xa7\xe6\x6a\xa7\xe5\x69\x27\xe7\x68\xb7\xca\xcf\x6e\x97\x9b\xdd\x36\x2f\xbb\x6d\x4e\x76\xdb\x7c\xec\xb6\xb9\xd8\x6d\xf3\xb0\xbb\xc8\xc1\x6e\x91\x7f\xdd\x4c\x1e\xb5\x2c\xf6\x69\xc6\x8d\x8f\x3f\x43\xff\x31\x77\xbe\x4a\xfa\xad\x3e\x14\x58\xdc\x83\x79\x8a\xed\xa7\x1f\xdf\xca\x2a\x88\x1c\xda\xf2\xbc\xe3\x86\xc5\xb9\x38\x48\xd9\x48\x45\x58\x3a\x9e\x2a\xed\x4e\xf3\xd5\x34\xff\xe4\x55\x22\x9d\x79\x82\x11\xba\xd9\x87\xeb\xaf\xed\xed\xdd\xf1\x0e\x52\x5f\x6b\xae\x8e\xb5\xe4\x70\x5f\x6b\xd1\x1c\x75\x88\x05\xf1\x57\x3e\xa0\xee\x03\x71\xf6\xaa\x79\x86\x59\xd8\x9c\xf2\x08\x32\x92\x08\x94\xc5\x8d\x97\x02\x47\x07\xb6\x4c\x4c\xfc\x18\xf1\x30\xa1\x2c\xea\xa9\xbc\x8f\xc9\x73\xa9\x2e\x25\x8f\xcc\xcb\xe7\x8c\x3c\x94\x97\xb1\x88\xad\xf3\x4e\xc4\x01\xf6\x81\x40\xe9\xd3\xe4\xf0\xb8\x45\x6f\xaa\x8a\x91\xd5\xd5\xfb\xd1\x93\x85\xb8\x90\xd1\xf2\x7a\x06\xef\x97\xab\xd5\xf2\x72\xda\x78\x3e\x1d\x48\x2c\x2d\xac\x96\xd7\xa8\xdf\xcb\xbb\x32\xae\x12\x1f\x76\xf5\xc8\xc2\xbf\x61\x43\xc9\x32\xf4\x65\x3c\x85\x86\xbe\xb4\x33\x07\x35\xdb\x4d\x1d\xfe\x7c\xf4\xef\x82\xf8\x45\x1d\xb9\x89\x04\x61\xb1\xde\xa4\xf6\x60\x47\x4e\x8d\x50\xbe\x7c\x97\x2c\xdf\xbe\x06\x69\x71\xd3\xbc\xfc\x77\x38\xe1\x29\xed\xb3\x9a\xf4\xee\x8a\x9e\xfe\xb2\x13\xb6\xee\x1a\xa1\x3d\xec\x43\x17\x61\x40\xf5\x00\xb2\x74\x4c\x49\xb5\x71\x11\x28\x4f\x36\x87\x35\xde\xf1\x23\x63\xae\x52\xfc\x5e\x48\x8c\xab\x27\x9f\xf7\xd8\x70\x50\x3b\x9c\x94\x27\x25\x9c\x40\x8d\x03\x75\x26\xe6\xcb\xc5\xea\xe3\xf2\x4c\xe5\xbd\x8b\x7f\x13\x87\xe9\x50\xd6\xcb\x8a\xf3\x5f\x6b\x23\xe7\x57\xd5\xdf\xe2\x30\x5d\xf1\x37\xdf\x9a\xdf\xae\xa6\x9d\xc8\xbb\xf7\x46\xa9\x8e\xde\x64\xd7\xbd\x40\xaa\x86\xf4\x1f\xd4\x7d\x4b\xa9\x26\x1f\x83\x51\x4d\xc9\xd8\x04\xe9\xcb\xfc\x35\x68\xc7\x5b\xcd\xd5\x5f\x71\x91\x6e\xd1\x1c\x35\xda\xb4\xdf\x47\x2f\x10\x14\x9d\xab\x9c\x2b\x09\x1e\xcc\x17\xfe\x00\x77\x8d\xaa\xcd\xc9\xd1\x83\x86\xda\xe2\x41\x43\xc3\xe2\x41\x43\x0f\x1a\x7a\xd0\xd0\x83\x86\x7d\x1f\x3d\x4f\x1c\x7a\xe2\xd0\x13\x87\x9e\x38\x34\x2f\x9e\x38\xf4\xc4\xa1\x27\x0e\x65\xf1\xc4\x61\xc7\xae\x27\x0e\xc7\x8a\x27\x0e\x07\xab\x79\xe2\x10\x67\xc0\x13\x87\x9e\x38\xf4\xc4\xa1\x27\x0e\x3d\x71\xe8\x89\x43\x4f\x1c\x7a\xe2\xd0\x13\x87\x06\x06\x3c\x71\x38\x49\x93\xbf\x6f\xe2\x50\xe7\xff\xf6\xe8\xa1\x47\x0f\x3d\x7a\xe8\xd1\x43\xd4\x7d\x7b\xf4\xd0\xa3\x87\x1e\x3d\x34\x2d\x1e\x3d\x34\xac\xec\xd1\xc3\x76\x2d\x8f\x1e\x5a\xd5\xf7\xe8\xa1\x11\x7a\xd8\x4e\xfc\x72\xc3\x17\x35\xc7\xcf\xfe\x22\x2e\x8b\x31\xe3\x70\x6b\x64\x7a\x6d\x65\xbf\x73\xad\x49\x00\x13\x80\x58\x12\x1a\xe7\x15\x15\xdd\xf8\x9c\x68\xf8\x36\x23\x7f\x05\xf6\xd8\xeb\x86\xc5\xc9\x2e\x8c\x83\xdc\x34\x1f\xa5\x8b\xb3\xcf\x67\xf5\x45\xb1\x06\x5c\x3f\x62\x7c\x53\x3a\x41\x82\x46\x17\x96\x0b\x4c\xf1\xc8\x4d\x97\x10\x98\x9c\x3f\x80\xcb\xfb\x03\xd4\x83\xd0\xf8\x13\xf1\x40\x1b\x19\xe0\x22\x0f\x10\x1c\x69\x84\xd0\x5a\x36\x49\x7e\x20\x98\x28\x47\x10\x20\xf2\x04\x51\xb6\x5e\x98\x5c\x41\x40\x1c\xbd\x60\x77\x94\x1f\x6c\x3c\x94\x0e\x8f\xf4\x83\xd9\xb1\x7e\xa4\xc5\x3a\xf4\x90\x0d\x1e\xed\x47\x9b\x6d\xa2\x00\xfa\xe3\xfd\x68\x93\x47\xc0\x01\xc0\x10\x09\x40\x1b\x2d\x71\xb9\x7e\x2c\x80\xd0\x1d\xf2\x58\xf9\x18\x1a\x80\x36\x2c\x03\x9d\x43\x78\x00\xda\x64\x0b\x27\xd0\x20\x02\x68\x93\xfd\x48\x41\x63\xce\xc3\x3f\xfc\x5e\xac\x80\xe4\x0b\x90\x65\x00\x2d\xa0\x92\x10\x20\x73\xd9\xf7\xe1\x05\x96\x96\x5d\x20\x06\x60\x8d\x19\x80\xd3\x99\xd7\x0a\x37\x80\xe3\x2f\x29\xac\xc9\x03\x30\xa5\x0f\xf0\xc3\x23\x19\x21\x10\x68\x73\x6e\x30\x4c\x21\x10\xe7\xb1\xe4\x61\x94\x44\xc0\x7f\x33\xcb\x23\x1a\xc3\x34\x02\x71\x4a\xdf\x4c\x40\x24\xc0\x24\x54\x02\x4c\x42\x26\x80\x19\x9d\x80\xff\x62\x74\xe3\x1a\x6a\xac\x82\x38\x0e\x1a\x44\x43\x87\x52\xa0\x0e\xad\x36\xd5\xa0\x76\x01\xe5\x53\xa1\x25\x1b\x1a\x63\x8b\x7c\xf3\x7a\xba\x41\x12\x0b\x68\xa3\xee\x09\x07\x98\x84\x72\x00\x43\xd2\x81\xb2\x00\x71\x1e\x8e\x01\x5d\x48\xa6\x43\x3c\x58\xad\xc0\x4e\xf5\xd4\x03\xad\x99\x5d\x4a\xa2\x11\x9f\xc1\x4f\x7b\x83\xa4\x44\x49\x3f\xa0\xcd\xba\xa7\x25\x60\x1a\x62\x02\x8e\x41\x4d\xc0\x84\xe4\x04\x4c\x48\x4f\xe8\x6d\x3b\x24\x28\xc0\x11\x45\x01\x84\xc8\x22\x58\xd0\x14\x60\x41\x54\x00\x9d\xaa\x00\x3a\x59\x01\x74\xba\x02\xac\x08\x0b\xb0\xa1\x2c\xba\x95\xb1\x91\x31\x8d\x05\x6c\x70\x0e\x5c\x10\x17\xe0\x82\xba\xe8\x35\x82\x89\x95\xf5\x19\x41\x05\xec\xfa\x8c\xa0\xa2\x76\xe0\x8c\xc2\x00\x37\x24\x06\xd8\xd0\x18\x40\x26\x32\xc0\x1d\x95\x01\xd3\x91\x19\x30\x19\x9d\x01\x66\x84\x06\xc5\x51\x33\x4a\x69\xe0\x17\xf6\x92\xea\x18\x27\x35\xf0\xdb\x90\x8a\xec\xe8\xa7\x35\xa8\x3b\x26\xe1\x70\xee\x21\x36\xd0\x36\x75\x84\x87\x42\x6d\x10\xb6\x20\x3d\x94\x47\x45\x6e\xe0\x7b\x73\x52\xd2\x03\x4c\x68\x0f\xf2\xa6\x71\x88\xf8\xc0\xaf\xf0\x5b\x84\x88\x86\xfa\x20\xbc\x59\x1a\x4a\x64\x9c\xfc\x40\x5f\x47\x01\x30\xb4\xf4\x07\xbe\x3b\x14\x5a\x64\x88\x00\xa1\x0c\x08\xb5\xc1\x6d\x0a\x84\xfa\xf6\xaa\x2f\xae\x4a\x82\x50\xdd\xd7\xdd\xa9\x91\xc8\xd0\x40\xc7\x43\x69\x4b\x8f\xc0\x64\x04\x09\xb8\xa0\x48\xc0\x05\x49\x02\x0e\x68\x12\xa0\x11\x25\x60\x17\x39\xb0\x25\x4b\xc0\x92\x2e\x01\xa7\x81\x0f\x0b\xca\x04\x8e\x1f\xf6\xb0\x04\x4e\xc0\x0c\x3a\x41\x0f\x88\xad\x14\xa5\xee\x07\x4f\xf0\x13\x5f\x27\xd6\xd1\x0b\x9f\xa0\x6d\x4b\x58\x65\x14\x40\x41\xdb\x15\xc0\xca\x74\x10\x0a\x4c\x03\xa2\x80\x21\x8c\x42\x5b\x25\xba\x07\x52\xc0\x08\x4a\xa1\x05\x97\x04\x80\xd1\x0b\xa6\xfc\x4e\x82\x3f\x64\xe0\x03\xc6\xa4\xa9\xe6\x24\xd0\x07\x0a\x79\x2a\x09\xb5\xf4\x82\x2a\x4e\x02\x56\x5d\x58\x85\xba\xe4\xaa\x43\x35\x1a\x60\x85\x32\x12\x7a\x23\x2b\x8d\x48\x89\xdb\x55\x92\xed\x11\x87\x5e\x1d\x29\x4a\x38\x64\x0a\xd8\x05\x26\x03\x5e\xc0\x2d\xf4\x02\x64\xf7\x34\x09\x7e\x01\x0b\x00\x06\xe8\x10\x0c\x58\x83\x30\x60\x0b\xc3\x00\x19\x88\x01\x32\x14\x03\x74\x30\x06\x2c\xe0\x18\x20\x03\x32\x60\x03\xc9\x80\x2d\x28\xd3\x35\x40\xf3\xc7\xdb\x01\x33\xe0\x00\x9a\xe9\xb3\x81\x77\x81\xdb\xc1\x33\x3d\x36\x08\xae\x78\x7b\x88\x46\xd3\x16\x4c\x0e\x2f\x40\xe7\xf1\x02\x67\x7a\x03\xa8\x64\x4b\xf0\x7d\xe5\xf4\x82\xa9\xf2\x7a\x81\x5d\x6e\xaf\xf8\xb0\xe3\x8b\xe9\x63\xf2\x32\x57\xe5\x25\xb1\xd5\x5d\xef\xe0\xb1\x0d\xe9\x84\x6d\xaa\xce\xf3\xa4\xcc\xff\x3d\x52\xa6\x1a\x1d\x9e\x93\xf1\x9c\xcc\x68\xf1\x9c\x8c\xe7\x64\x3c\x27\xe3\x39\x19\xcf\xc9\x78\x4e\x06\xb1\xa0\xf0\x94\x8c\xa7\x64\x3c\x25\xe3\x29\x19\x4f\xc9\x78\x4a\xc6\x53\x32\x9e\x92\xf1\x94\x8c\xa7\x64\x3c\x25\x63\x5a\xd5\x53\x32\x9e\x92\xf1\x94\x8c\xbe\x78\x4a\x66\xa0\x78\x4a\xc6\x53\x32\x9e\x92\x29\x1b\xed\x29\x19\x4f\xc9\x34\x8b\xa7\x64\x3c\x25\x83\x37\xe2\x29\x19\x4f\xc9\x98\x05\x3d\x3c\x23\xe3\x19\x19\xcf\xc8\x78\x46\xc6\x33\x32\x9e\x91\x01\xcf\xc8\x78\x46\x66\xb4\xaa\x67\x64\x08\x35\x3d\x23\x63\x54\xd9\x33\x32\x9e\x91\x19\x6f\x8b\x67\x64\xfe\xb8\x8c\xcc\x3e\x5c\x7f\x6d\x6f\x6e\x8f\x87\xcb\x5c\x6b\xae\x8e\xb5\xe4\x7a\x57\x6f\xd1\x26\x75\xbc\x05\xf1\x57\x3e\xba\xee\x03\x71\xcc\xb0\x93\x24\xc8\x74\x79\x42\xe1\x05\x36\x61\xca\xd6\x34\xd2\xc5\xf6\x25\x3d\x2b\x2f\x4d\x31\x71\x8c\x67\x59\x35\xd0\x26\x5f\xee\x9b\x8f\xd2\xa1\x72\xc8\x58\x01\xbd\xc8\x87\x8d\x0c\x56\x65\x2c\x62\xeb\xbc\x93\x09\x1f\xf6\x41\xc6\xff\x4a\x93\xc3\xe3\x16\xbd\xf5\x2a\x86\x57\xe7\x80\x10\x9c\x9d\xdf\x2c\xe4\xb7\xf8\xd3\xd5\xed\xf5\xe2\xc3\xf9\x4f\xe7\x8b\x33\xdc\x4c\xb3\x5a\x5e\xcf\xe0\xfd\x72\xb5\x5a\x5e\x62\xce\x7e\xe0\xf3\x2e\x6a\xdb\x8a\xb2\xb0\x5a\x5e\xa3\x7e\x2f\xef\xca\xb8\x4a\x7c\xd8\xd5\x23\x0b\xff\x9a\x85\x71\xce\x1e\x51\x1f\x33\xbe\x37\x0d\x72\x51\xf3\x6f\x7f\xa1\xbf\x9f\x57\xcd\x76\x53\x87\x3f\x1f\xfd\xbb\x20\x7e\x51\x47\xae\x70\xa6\xe0\x33\x46\xb7\x07\x3b\x72\x7e\x84\xf2\xe5\xbb\x64\xf9\x16\xc7\x45\xb9\x99\xf1\x6e\x9a\x97\xff\x5e\x67\x3d\xa5\x91\x56\x33\xdf\x5d\xd1\xdd\x5f\x76\xc2\xd6\x5d\x23\xc6\x8a\x7d\xf2\xc2\x01\xa9\x7a\x90\xa5\xef\x8c\xc5\x79\xf4\x22\x4f\x41\x24\xe8\x80\x6a\xbe\x2d\x03\xe7\xcf\xdb\x70\xbd\xad\xb8\xa5\xc6\x71\xff\x7d\x90\xe2\x6d\xb6\xc6\x7a\xc1\xfd\x50\x03\x68\x9d\xd9\xf9\x72\xb1\xfa\xb8\x3c\x53\xa6\xe6\xf2\xdf\xc4\xa9\x4c\x94\xf5\xb2\xe2\xfc\xd7\xda\xc8\xf9\x55\xf5\xb7\x38\x91\x59\xfc\x7d\x31\x5f\x2d\x6e\x57\xd3\xce\xe6\xdd\x7b\xa3\x54\x47\x27\x7e\xad\x7b\x81\x54\x0d\x99\xd3\xb6\xee\x5b\x4a\x35\xf9\x18\x8c\x6a\x4a\x84\x2d\x48\x5f\xe6\x78\xa4\xd9\x7e\xcd\x7e\xab\xb9\xfa\x6b\xaf\xd9\x2d\xda\xa4\x46\xdb\xf6\xfb\xe8\x05\x82\xa2\x87\x9b\x27\x85\x20\x78\xc0\xec\x3d\x2b\x60\x9e\xcf\x8e\x7c\xc2\x39\xe4\xe6\x0e\x49\x12\xdd\x4e\xa7\x83\xdd\x7c\x05\xe9\x54\xb0\x01\x11\x5c\x13\xbe\xb8\x7d\xf1\x00\x0d\xac\xd0\xbd\xd8\x0f\x57\x97\x04\x3e\x06\xd9\x6b\x42\xf5\x8a\xaf\x1d\xca\xea\x20\xd1\x5b\x92\x99\x28\x8b\x63\x34\xaf\xfc\xe4\xa1\x4c\x0e\x91\xbc\x6d\x32\x17\x37\x44\x3a\x14\xaf\x2b\x08\x55\x3d\xc3\xd2\x43\xe4\xa2\x2c\xd6\xec\xc7\x00\x8d\x8b\xdb\x7f\xd6\x2c\xcd\x00\x89\x8b\x1b\xa4\x5d\xfc\xc7\xf2\x34\x8f\x1d\x81\xeb\x66\x76\xb3\x22\x6f\x8f\xfa\xd9\xb3\xa6\x6e\x8d\x88\x5b\xac\xa3\x63\x98\xb6\x2d\xce\xa6\xe1\x86\xee\x10\x69\x5b\x91\xb3\x28\x93\x63\x94\x2d\x05\xb3\x1b\x23\x6c\xcb\xc8\x35\xca\xa8\x7b\xba\xd6\x3d\x59\xeb\xfe\x68\x85\x01\x51\x4b\x3f\x5a\x31\x78\xac\xa2\xa2\x63\xd1\x9f\xc1\x26\x49\xdb\x47\xc6\xa2\x8c\x76\x0e\x66\x68\xa9\x58\xe4\xb2\xa2\x3b\x7e\xb4\x44\x2c\xd2\x0b\x99\xaa\x47\x33\x1c\xd1\xb0\xee\x49\x58\x13\x0a\xd6\xe2\xc3\x3a\x7c\x52\x03\x3b\x8b\xf6\xd3\xaf\x2a\xcd\x8a\xb2\xab\x25\x5f\xfb\x48\x56\xdc\x6b\x35\x72\x44\xa3\x72\x83\x10\xe6\xbf\xbf\x4f\x40\xb0\x4e\x40\xaf\x4e\x4e\xae\x4e\x45\xad\x4e\x45\xac\x4e\x4a\xab\xba\x20\x55\xf1\xfe\x2d\x1a\xa1\x4a\xa5\x53\x89\x64\x2a\x91\x4a\x25\x12\xa9\x74\x1a\x95\x4c\xa2\xda\x51\xa8\x96\x04\xaa\x35\x7d\x6a\x4d\x9e\x5a\x53\xa7\xd6\xc4\xa9\x35\x6d\xea\x86\x34\x75\x40\x99\x92\x09\x53\x1a\x5d\xea\x88\x2c\x9d\x88\x2a\x9d\x86\x28\x35\xa0\x49\xd1\x4b\xd9\x31\x92\xb4\x24\x43\x51\x46\x47\x29\xd2\x06\x15\x8a\x32\xdc\x4b\x90\x36\xcf\x02\xa0\x2c\xf6\xd0\xa3\x5a\x1a\x14\xb9\x9a\xaf\xc9\xd1\x7e\x12\x14\xe9\x9c\x95\xd4\xe8\xc4\x14\xe8\x28\x01\x4a\x8a\xde\x0d\xd1\x9f\x1d\x9a\x13\xb9\x1b\x69\x93\x9f\x7a\x92\x13\xb7\x15\x1d\xa3\x3e\xf9\xad\xd0\x36\x38\x5a\xe2\xd3\x86\x55\x1c\xa0\x3d\x3b\xf4\x26\xce\x70\x8b\xf4\xd4\x92\x9b\xf4\x67\x35\xeb\xa5\x36\x71\xad\xac\xb9\x57\xb7\xc4\xe6\x34\xb4\xa6\x35\xa9\x69\x4d\x69\xda\x12\x9a\x04\x3a\x93\x8c\x36\xda\x51\x99\x36\x44\xa6\x1b\x37\xb8\x05\x89\x79\x54\x27\xb8\x25\x85\x69\x40\x60\xa2\xc5\x2e\x06\xe9\xcb\xa6\x37\x1b\x37\x0d\x0f\x93\x97\x05\x49\x89\xfb\xd4\x8f\x50\x97\x92\xa2\x44\x7b\x97\x27\x21\x2e\x27\xa0\x2d\x4d\x48\x4b\xb1\x8c\xc0\xb9\x6d\x9d\x53\x96\xe3\x84\x65\xfd\xcc\x70\x2e\xbb\x3e\xba\xb2\xe9\xd2\x27\x0c\x52\x8d\xfb\x5f\x75\xe9\x5b\xf8\xc1\xdb\x54\x65\xd1\x25\xb8\xfb\x1e\x26\x2a\x1b\x84\x24\xfa\xe4\x98\x8e\xa6\xb4\x0e\x55\x0c\x3b\xec\x69\xd1\xf5\x16\x45\xa9\xf7\xb1\x53\x26\x28\xe7\x4a\x93\x53\xa8\x4c\x76\x90\x44\x87\x24\xe4\x34\x14\xa4\x43\x02\x12\xef\xf4\x24\x91\x8f\x54\xea\x91\x48\x3c\xda\xd1\x8e\x56\xa4\x23\x8d\x72\xa4\x11\x8e\x44\xba\x91\x4a\x36\xd2\xa8\x46\x32\xd1\x68\x45\x33\xda\x91\x8c\xb6\x14\xa3\x2d\xc1\x68\x4b\x2f\xda\x92\x8b\xb6\xd4\xa2\x0b\x62\x91\x48\x2b\xe6\xea\xde\xe1\x22\x88\x1f\x0f\xc1\x23\x33\x99\xe3\x90\x9b\xa5\xd6\x26\x69\xa5\xbf\xb0\x81\x21\x75\xdf\x21\x31\xbe\xea\xdc\xcc\x03\xcb\xd5\xd3\xf2\xe3\xb3\xfb\x21\x0e\xf3\xe5\x13\x4b\xd3\x70\x73\x84\xfb\xfe\xd4\xb8\x1a\xf6\x66\xf9\xd2\x90\xb7\x96\x2f\x6e\xc4\x5a\xbb\xf0\x58\xc9\xc5\x88\xb8\x75\x43\x6f\x8f\xb2\x2c\x12\xe7\xe4\x62\xe9\x58\x28\x72\x6a\x88\xab\x08\x24\x20\x31\x6f\x2b\xff\x7e\xbf\xc8\xaa\xe2\xd8\x68\xb0\x16\x10\x41\x2c\x68\x3e\xd9\xb8\xe2\x7c\x23\xbf\x11\x49\xea\x18\x99\x0d\xb3\xda\x5d\x1b\xc8\xbf\x7f\xbb\xe3\xd7\xb9\x33\xc7\x51\x53\xf6\xf0\xe5\xe9\xc7\xb7\x29\xcb\xf2\xb7\x4f\x3f\xbe\x2d\x41\xd0\x53\xb9\xe8\x3c\x2b\xba\x39\x31\xa5\x55\x0b\x67\x4e\x0c\x77\x97\x2d\x03\x03\xa7\x0a\x72\xf6\x2d\x1f\x1a\x61\x86\xc7\xce\xdb\x6f\x12\xfb\x66\xf4\x6b\x03\x97\xc2\x88\xa9\xd6\x9b\x97\x06\xcf\xc5\x6b\xc0\x57\xeb\xbb\x20\xfd\xba\x49\x9e\x63\xd8\x84\xd9\x3e\x0a\x64\x08\x80\x7d\xcb\x0f\xc1\xf0\x49\x5b\x3e\x86\x47\xf4\x8f\xe4\x2d\xac\x93\xf8\x21\x0a\xd7\xf9\xa0\x1b\xea\x04\xbe\xbd\x7c\xd8\x06\xe9\xd0\x6d\x9c\x40\x56\xba\x4e\x06\x7f\x75\x1f\x05\xf1\xd7\xc1\x5f\x44\xc9\x63\x76\x1d\xc4\x6c\xe8\x0e\x4d\xcf\xa9\x17\xfd\x30\x36\x03\x21\x66\x9f\xf6\xe9\x4c\x79\x81\x91\x4a\x9d\x09\x87\x3f\xc3\xb2\x71\x85\x2b\xa1\x78\xc2\x63\x4b\xe7\x82\xc5\x9b\xec\x86\x7e\x32\x99\x41\xb0\xc3\x5f\x1a\x35\x70\x85\x69\x48\xd7\x76\x6f\x85\x59\xd1\x07\x39\xea\x10\x4f\x79\x70\xe7\xa7\xe5\xcd\xe5\x7c\xd5\x62\x9d\xe6\x37\xff\x38\x5b\xfe\x72\x35\x83\x9b\xf9\x2f\x63\xc7\x35\x4c\x76\x2b\x27\x9a\xcb\x8c\x56\x29\x5b\x31\xfa\xc3\x9b\xf9\x2f\xfd\xb3\x61\x98\x47\x83\x1f\x5c\xc3\x91\xd1\x59\x58\xe4\x83\x7d\xac\x3c\xb7\xa5\xf8\xdf\x20\x92\x9f\x24\xd1\xa2\xd2\x73\xf0\x2c\xc6\x44\xff\x08\x2f\xe6\x19\xf7\xf3\xf9\xaf\xa3\x13\x18\x66\x4c\x8f\x5b\x6b\xcd\xea\x6b\xfe\xf3\x56\xd8\x6f\x34\x7d\x1f\x6a\x86\x36\x9b\x7d\xf3\xe1\x6f\x91\xab\xe9\xd9\x24\x07\xe7\x89\xb8\xff\xdb\xe1\x90\xb8\xf1\x3c\xcf\xfb\x57\x8e\xbb\xd1\x80\x0a\x82\x43\x6b\x4f\xf6\x8d\xab\x38\x9c\x21\x8b\xd1\x84\xb0\xae\x8c\xae\x33\xf9\xe1\x80\x44\x56\x15\x5e\x62\xfe\xae\x89\x3e\x19\x5b\xa9\x63\x80\xaf\x5d\x72\x8c\xb5\xfc\x65\x62\xb4\x2e\xb6\xec\x60\x7e\x15\xc3\xd0\x4c\x97\x4a\x90\x6f\x33\xef\x0e\x8d\xd4\xc1\xe5\xf2\x6c\x81\x57\x39\xf8\xb0\xbc\x58\xde\xcc\xe0\xd7\x2f\x37\xf3\xcf\x33\xb8\x5d\xcd\x57\xb7\x26\xe7\x06\x4d\x5d\x67\x27\x9d\x66\x19\x55\x12\xad\x32\xfa\xa5\x68\xb8\xd1\x2f\xc5\xbd\x0d\xfe\xb2\x9c\x17\xcc\x5e\x64\x93\x03\x4f\x6d\xa1\x90\xf1\x89\x07\x86\x35\x7c\xc4\x46\xb1\x5a\xb1\x49\x1c\x22\xcc\xcc\x5e\x39\x52\xd8\x99\x12\x6a\x36\x0c\x2f\xa3\xb8\x5c\xc2\x6b\x67\xd8\xdb\x98\xcc\xcd\x27\x6d\x17\xcb\x68\x0d\x1c\xd8\x1a\xb1\x47\x16\x6f\x56\x6c\xb7\x8f\x82\xdc\x68\xce\x23\x84\xb9\x5b\x83\xf2\x42\xb9\xa6\x91\x05\x75\x66\x9a\x8b\x30\x2c\xaf\x5c\x6e\x20\xf9\x97\x20\x0e\x76\xfc\xcf\xbb\xda\x35\x64\x1a\x3d\xe8\x30\x48\x62\xd0\x17\x07\xa4\xc2\x0c\xb2\x6d\x72\x88\x36\x22\x78\x63\x68\xb1\x68\x96\x08\x3f\x0a\x32\x76\x9f\x44\x45\xbc\xaf\x58\x1f\x8a\x78\xcf\xdd\xbf\xfd\x4f\x14\xdc\xb3\xe8\x0b\xef\x98\xff\xbd\x33\x0d\xa2\x36\x24\x17\x52\x96\x25\xd1\x13\x2b\xf9\x4f\x61\xed\xcd\x9b\x4c\xce\xd1\xa7\x60\x32\xb3\xee\xc2\x78\x4e\x01\x9b\x6d\x87\xc1\x65\xe7\xba\xf8\xa1\xa0\x2e\xc0\xa3\xe4\x99\xa5\x70\x9f\x1c\x64\xb8\x13\x11\xa2\x2b\xa2\xce\xfc\xbd\x64\xf1\xfa\xa5\x58\x59\x84\x59\x35\x12\x66\xe2\x54\x18\xe3\x6d\x35\x3e\x11\x75\xff\x52\x84\x9c\xaa\xf3\x8c\xbb\x30\x0e\x77\x87\x5d\x43\x66\x57\xc6\xa8\xcc\x49\xbf\x43\xc6\x64\xcc\xbc\xb9\xbc\x96\x3e\xcc\x9f\x92\x14\xd8\xb7\x80\x37\x73\x06\x21\x26\x36\x5b\x66\x04\xdf\x1f\xee\xa3\x30\xdb\x32\xde\x7b\x6b\x06\xec\x89\x9b\xfd\xe1\x1d\x6f\xf6\x21\x67\x02\xe7\x36\x35\x79\xb7\x0b\xe3\x2f\x1a\xc4\xbc\x7e\x93\x4a\xc8\xf8\x07\x53\x57\x79\xd1\x0a\x91\x15\xe2\x59\x98\x89\x93\x1c\x76\xc1\x57\xde\x0f\x71\xc6\x6a\xff\x6d\x10\x9b\x3e\x22\xd1\x42\x79\xff\x41\x2e\x4e\x46\xca\xab\x54\x38\xbb\xd1\xf9\xaf\x7d\x94\xe4\x2b\xfe\x36\x1c\xe5\xbd\xb9\x2e\xae\x86\xab\x8b\xff\x8a\x95\xd7\x31\x3e\xd7\xa3\x73\x69\x54\xaf\x50\xf5\xe0\x79\x67\x99\xbf\x43\x05\x88\x28\x97\x1b\xdd\x25\xe8\xf5\xc5\x72\xf5\x65\xf5\xf9\x9a\xb0\x0e\x05\xb8\x38\xbf\x5a\x88\x15\xe8\x87\x7f\x2c\xce\xbe\xcc\x6f\x16\xf3\xfa\xff\xbd\x9f\xdf\xcc\xe0\xe3\x62\xbe\xba\x9c\x5f\x9b\x61\x2d\xe6\xb1\xdd\x13\x7d\xb3\x0d\xeb\xf2\x56\x1b\xfe\xb4\x79\x6b\xc8\x2a\xef\xe7\x66\x61\xd8\x93\xb2\x93\x4c\x76\x4b\xea\x4a\x06\xf3\xb6\x20\xb4\x54\x86\x03\x45\x53\xbf\x34\x94\xcb\xf5\xac\xbe\x8b\x43\xc3\xfc\x53\x24\x26\x78\xf1\x11\x31\x3f\x1a\xd2\x04\x4b\x1f\xd2\x64\x27\xde\xa3\xdb\x3c\x58\x7f\xdd\xa4\xe1\x13\x4b\x0b\x55\xcc\x0c\xe6\xd7\xe7\x86\x32\x96\x68\xc9\x94\x9c\x28\x7a\x49\x92\xd0\x19\x78\xf4\x48\x51\x53\x77\xcf\x9f\xdc\x04\x65\x4c\xc8\xba\xb0\x0f\xd2\x60\xc7\x72\x96\x66\x6a\xa8\x12\x11\xe6\xc5\x9e\x69\x36\xdf\xa8\x94\xe5\xa4\x50\x96\x33\xae\x40\x11\xe2\x81\x26\x19\x42\x39\x56\x8c\x1c\x58\xa0\x91\xe2\x21\x26\x38\x9b\x6e\x6c\x51\x5b\xa4\x0c\xb5\xf7\x2f\xb0\x61\x0f\xc1\x21\xca\x67\x85\xbe\xd2\x33\x25\x63\x53\x73\xf2\x51\x50\xe4\x4f\x42\xcb\xb3\x3c\xce\x8e\xb4\xda\x10\xb6\xd8\xf1\x9d\xda\xbe\x95\x1b\x05\x9f\x9f\xa3\x4e\x06\xf6\x14\xb2\xe7\x6a\x73\x36\xe6\xaf\xee\x16\xda\x28\x06\x3b\x51\x29\x59\xdc\xa4\x30\xb2\x15\x98\x92\xc5\x40\x66\x0a\x6d\xb3\xe6\x60\xb2\x41\xb1\x29\x82\xe1\xa6\x3c\x95\x46\x72\x8a\x60\xf1\x38\x22\x55\xe5\xb5\x06\xa5\xaa\x08\x16\x2b\x71\xab\x61\xc1\x2a\x82\xe5\x52\xf2\x68\x4c\xb6\x8a\x60\x5a\x12\x78\x43\xe2\x55\x04\xa3\x2d\xb9\x2b\x8d\x84\x15\xc1\x68\xbf\xe8\x95\x98\x13\x09\x16\x9d\x4a\x5f\xc9\x32\x2e\x80\x45\x19\xaa\xd9\x14\x32\x58\x3a\xdb\x5d\x31\x2c\x8a\x49\x5a\x32\x3c\x3b\x11\x2d\x59\x5c\xce\xe6\x56\x82\x5a\x2d\x8b\x53\x2e\x57\xac\xe5\xb4\x64\x31\x12\xd5\xa2\x7c\x25\x92\x11\x69\x2d\xea\xdc\x1d\xf4\x0b\x6c\x91\x3e\x66\xb5\x24\x97\x46\x66\x8b\x68\xb1\x2d\xcc\xa5\x8a\x6d\x11\x8c\xf6\xcb\x73\x15\x5e\x1d\xda\x57\x77\x40\xa4\x8b\x94\x09\x0d\x1c\x4b\x75\xc9\x32\x4d\x2e\xb4\x51\xd9\x2e\xe2\xb3\xd7\xe5\x16\x2b\x09\x1e\x8a\x49\xad\xdc\x57\x2d\xe1\x45\x59\x56\x0c\x8a\x7e\xa9\x1d\x41\x19\xab\x3d\xd2\x5f\x8d\x91\x46\x59\xce\x0e\x0a\x80\x49\x51\x2f\x82\x59\xf7\x32\x60\xb2\xb8\x17\x03\x93\xc5\x39\xae\x54\x34\x77\x02\x68\x49\x96\x0e\x47\xa4\xca\x83\xd1\x1e\x99\x22\x28\xa6\x17\x09\xa3\x36\xb5\x2b\x2b\xd6\x60\x99\x28\x2f\xdb\xa0\xb8\x58\xe9\x93\x27\x18\x76\x2f\x31\xa6\xd8\x75\x29\x34\xa6\x18\xee\x95\x1b\xb3\x34\x59\x08\x94\xe9\x45\xc7\xe8\xb6\x55\x99\x32\xbd\xf4\x98\x13\xeb\x7f\x7d\xa7\xb3\xfe\xce\xc6\xba\x46\xb2\xcc\x89\x0c\x99\x2c\x94\xac\x84\x74\x49\xb2\x56\x6d\x42\xb6\x3c\xb2\x3c\x59\xbb\x32\x3a\xc5\x1f\x59\xaa\xac\x5d\x99\x92\x71\xcf\x42\xb6\x4c\x57\x9d\x92\x2d\xcf\x5a\xc2\xac\x65\xc4\x26\x73\x9f\x03\x39\xb3\x01\x33\xd8\xbc\x79\x0e\xa4\xcd\xfa\xcd\xa0\xb3\xf8\xb9\x92\x39\x6b\xd9\xb2\x4d\x0a\x48\x96\x3c\x93\x85\x26\x7c\x26\x8b\x23\xf9\x33\x59\x46\x45\xd0\x08\x33\x7d\x29\x9b\x36\x24\x85\x66\xb9\x1e\x3c\xd5\x0b\xa2\x91\x36\x5c\x2d\x09\x35\x2b\x3d\x22\x59\xfa\x84\xd4\x2a\x71\x34\xca\x9a\xcd\xb1\x9c\x9a\x2c\x63\xa2\x6a\x54\x57\xfc\x90\xb4\x1a\xcd\xd5\xd0\x14\x63\xeb\x13\x58\xb3\xdb\xca\x4a\x87\x48\x47\x66\x8d\xd2\x5c\xf7\xc2\x6c\xb2\x8c\xca\xb3\x59\x6c\x62\x87\x44\xda\x28\xe3\xb5\x25\xeb\xd6\x96\x6a\xa3\xb4\xb4\x47\xdc\xad\x1a\xb9\x14\x4f\xfe\x80\xc4\x1b\x51\x39\x4d\x16\xd7\x42\x6f\xb2\x8c\xcb\xbd\x59\x44\xe5\x36\x7d\xa2\x6f\x04\x9b\xea\x04\xa0\x91\x7e\x23\x7a\xb2\xda\x9a\x95\x2d\x01\x38\x92\xe7\xa9\x5f\x32\x0e\xce\x71\x39\x11\x64\x99\x40\x38\x4e\x16\x6b\xf9\x38\x59\xac\x45\xe4\x9a\x66\xe8\x52\x72\xb2\x10\x04\xe5\x64\xb1\x8a\xaf\xd8\x8a\xcb\xc9\x62\x23\x31\x27\x8b\xcb\x20\x91\x85\xdc\x5c\xcb\xde\x94\x21\x22\x4b\xb1\x39\x59\x0c\x24\xe7\x28\x73\x96\xcc\x52\xdb\x27\x3c\x67\x33\xb7\x74\xd2\xb7\x94\xf2\x73\xd4\x08\x7c\x53\xb0\xae\x2b\x42\x47\xb2\xda\x95\xad\x53\xa4\xe8\x28\xeb\x60\x87\xe2\x75\xb2\x4c\x20\x61\x27\xcb\x98\x90\x1d\x25\xa4\x5f\x4a\x09\xf7\xc8\xd9\x11\x4c\x2a\x02\x78\x5d\x51\x3b\xca\x23\xea\x95\xc1\x6b\x84\xcb\x28\x7e\xeb\x5e\x31\x3c\x92\x1c\xab\x2c\x2e\x25\xf1\x64\x19\x15\xc6\x23\x2e\x53\x46\xe4\xf1\x08\x56\x6b\x41\x3d\xad\x48\x9e\xc3\xc0\x60\x57\x2a\x8f\xbc\xab\x68\x04\xc3\x34\x82\x79\xb4\xf7\xbf\x37\x72\xd5\x88\x43\xd1\xce\x09\x19\x88\xb6\xd3\x56\x6d\x43\x29\x6d\xe0\x9e\xd4\xbd\x13\x88\xee\xc9\x32\x8d\xf4\x9e\x2c\x0e\x05\xf8\x64\xa1\xba\xfb\x49\x62\x7c\x6a\x65\xac\x24\x9f\x5a\x1b\x29\xcc\xa7\x56\xa6\xc9\xf3\xa9\x36\x48\x22\x7d\xaa\x09\x5a\xc0\x82\x22\xd8\xd7\xaa\x4b\x0b\xb3\xd0\xc4\xfb\xd4\xda\xb4\x48\x09\x51\xc8\x4f\xad\x6e\x15\xe8\xb0\x11\xf5\x53\x6d\xd8\x86\x39\xec\x04\xfe\xfa\xad\x50\xc2\x0a\x76\x62\x7f\xbd\x56\x48\x21\x0e\x7b\xe1\x3f\x6d\x7b\x4c\xe5\xff\xea\xf2\x80\xe4\x7b\xc0\x89\xc2\x7b\x21\xe4\x84\x03\x4e\x60\x58\xf0\xe0\xb7\xfa\x88\x36\xb2\x1b\x65\x1f\x98\x6b\xca\x3d\xfd\xf8\x56\x56\x31\x14\x8d\xab\x8b\x3c\xb7\xbe\x61\x71\x2e\x8f\xc5\x0b\xb6\x58\xac\xec\xca\x8c\x16\xa5\xeb\x0c\xed\x31\xe5\x1f\xf2\x52\xdd\x8e\xaf\x0b\x04\x7a\x86\xf9\xc4\xee\xc3\xf5\xd7\xf6\x46\xfe\x75\x10\x9d\x6b\x4d\x4b\x68\xf6\x9c\x7b\x36\xac\x9b\xa6\x0e\xe0\x20\xfe\xca\xf7\xa0\xf7\x81\x38\xa3\x4a\xf6\xc0\xca\xd1\x78\x2c\xf0\x65\x13\xa6\x6c\x8d\x07\xb8\xc0\xb1\xff\xeb\xac\x6c\xc6\x77\xe4\xf8\xd2\x0d\x8f\xaa\x9d\x0e\xce\x46\x7f\x94\x2e\xab\x43\xc6\x0a\xc8\x4b\x8c\x1f\xda\xc6\x2a\x63\x11\x5b\xe7\x9d\x94\x4a\xb0\x0f\x32\x5a\x1c\x30\x4d\x0e\x8f\x5b\xa9\x91\x21\xc7\xa3\x8b\x23\x66\x67\xe7\x37\x0b\xb9\x02\x51\xf4\xf0\x56\xcb\xeb\x19\xbc\x5f\xae\x56\xcb\xcb\xe3\x9d\x1a\xd2\xb6\x85\x60\x67\xb5\x34\xa1\xae\xdb\xb5\xe4\xdd\x22\x2b\xc6\x87\x5d\x3d\x1e\xa9\xef\x6b\x18\xe7\xec\x91\xb0\x7f\x2f\x84\x20\x79\xfd\xbf\xfd\xc5\xf6\x75\xbf\x6a\xde\x89\xdd\x6b\xc4\xdf\xa2\x5d\x10\xbf\xd8\x46\xbe\x85\x93\x4b\xbe\x91\xfc\xa5\xd1\xbd\x02\x48\xb3\xc5\x0b\x7d\xc9\xf2\xed\x6b\x13\x85\x37\xcd\xa6\x7c\xe7\x53\xac\xd2\x56\x07\xd3\xec\x5d\xf1\x1c\xbe\xec\x84\x45\x74\x82\x63\x59\xda\xa1\x7b\x35\x28\x20\x5d\x99\x2c\xa6\x1d\x39\x10\xd9\x0a\x13\x91\x50\xad\xc6\x47\x6a\x79\x21\x82\x45\x95\x94\xd9\x07\x05\x3c\x68\xfd\x8a\x14\xe0\x5d\xd3\xc9\xe8\xe0\xb3\x70\xb9\x58\x7d\x5c\x9e\xb5\x34\x52\xe5\xbf\x89\x03\xc8\x74\x93\xe2\xd0\x71\xf9\xf7\xf9\x55\xf5\xf7\xed\xa7\x4b\x0b\xab\x17\xf3\xd5\xe2\x76\x75\xbc\x4f\x55\xb7\x7f\xe8\x46\x88\x0e\x98\xba\x3f\x2d\x2a\x93\x7c\x4e\xf5\x13\xa3\x57\x96\x0f\x0c\x51\x3f\xd3\x64\xda\x7a\x9d\xed\x93\x2e\xe7\xd7\x77\xb2\x7d\xb2\x6e\x9a\x1a\x09\xde\xef\xa3\x17\x08\x8a\xae\x6f\x9e\x92\xc3\xee\x9d\x1f\x72\x96\xc2\x5d\xc3\xc0\x5d\xe3\xec\x95\x17\x14\xf0\x82\x02\x78\x8b\x5e\x50\x40\x5f\xbc\xa0\x80\x17\x14\xf0\x82\x02\x06\xe5\x0f\x23\x28\xa0\x5b\x14\x78\x65\x01\xaf\x2c\xe0\x95\x05\xbc\xb2\x00\xde\xa4\x57\x16\xa8\x8b\x57\x16\x90\xcd\xf5\xca\x02\x5e\x59\x40\xb1\xeb\x95\x05\xbc\xb2\xc0\x70\xf1\xca\x02\x84\xca\x5e\x59\xc0\x2b\x0b\x8c\x9a\xf1\xca\x02\x4a\xf1\xca\x02\xfd\xc5\x2b\x0b\x78\x65\x01\xaf\x2c\xe0\x95\x05\xbc\xb2\x80\x57\x16\xf0\xca\x02\x5e\x59\x60\xb0\x78\x65\x81\x3f\x80\xb2\x80\x2e\x56\xe4\x25\x06\xbc\xc4\x80\x97\x18\xf0\x12\x03\x5e\x62\x00\x57\xbc\xc4\x00\x78\x89\x81\xaa\x78\x89\x01\x51\xbc\xc4\x00\xd5\x86\x97\x18\xa0\xd4\xf6\x12\x03\x5e\x62\x60\xd4\xca\xef\x57\x62\xa0\x9d\x48\xf4\x86\x2f\xff\x5e\x37\x9b\xa8\x68\x02\xde\x98\xf3\xad\x2c\xae\x1d\xca\xfe\xf4\x5a\x93\x50\x34\x00\xb1\xb4\x86\x7b\x96\x3f\x33\x94\x57\x2c\x7f\x4e\xd4\x6c\x8c\x52\xdb\xc0\xfc\xdb\x49\x43\x1e\x36\x2c\x4e\x76\x61\x1c\xe4\xc9\x2b\xc9\x0c\x9c\xd5\x0d\xa0\x99\x99\x66\x48\x50\x9b\xd5\x09\xc0\x35\x3a\xb8\x5c\x88\x8b\x21\x82\x5b\x14\xe1\x33\xc9\x02\x25\x9b\x2c\xd8\xa1\x33\x54\xe6\x0a\x6c\xc6\x13\xb8\xcb\x2e\x0b\x47\x1d\x57\x36\xad\x9c\x22\xeb\x2c\x98\x64\x9e\xa5\xf8\x4b\x84\x87\x7b\x24\xfb\x2c\xd1\x0f\xe3\x2c\x03\x2d\x58\x8d\x7c\x70\x01\x8e\x81\xbd\x17\xdc\x39\x40\x06\x93\x41\x64\x30\x06\x92\x51\x6d\x16\xf8\x59\x0f\x4c\x46\xb4\x5a\x79\x8d\x8f\x09\x94\xc1\x24\x50\x19\x8c\x80\x65\x44\x93\x1a\x38\xcb\xfa\x54\x3e\x48\xcf\x90\x8a\xa4\x35\x00\x33\xa2\xc9\x1e\x2c\x4d\x40\x66\xee\xee\xde\x0a\x4d\x83\x49\xf0\x34\x18\x43\xd4\xa8\x83\x34\x1b\xc0\xd4\x88\x36\xcb\x6e\xd4\xa1\x6a\x96\x26\xfb\x3b\x96\x3e\x3b\xc4\xae\xa2\xb9\x6e\xb0\x35\x98\xe0\x5b\xe2\x00\x5f\x83\xd7\x5a\x60\x39\x22\xd9\x60\x42\x9a\x0d\x26\x22\xda\x60\x1a\xaa\x0d\x46\xc8\x36\x72\x4b\xfb\xe9\x36\xea\x8c\x57\x30\x71\x1a\xc2\xcd\x6a\x22\x29\xb9\x38\x85\x72\xa3\xf6\xa4\x8e\x8d\x2b\x49\x37\xa2\xcd\x5e\x3e\x8e\x76\x62\x11\x8a\x60\xb2\x73\x46\x0e\x86\x39\x39\x6a\x53\xe3\x4d\x5f\x24\x8c\xda\x9f\xda\xd8\x5f\xc5\xcb\x11\xad\x56\x94\x9d\x86\x99\xa3\xde\x7a\x1f\x69\x47\x8b\x27\xf2\x32\x40\xdb\x39\x19\xf1\x6d\xe2\x8e\x68\xb4\xe6\xf4\x26\xa3\xee\x60\x94\xbc\x23\x6f\x5c\x5a\xc1\x4e\xa7\xf4\x1d\x4c\x4a\xe0\xc1\x30\x85\x47\xb4\x18\x27\xf9\x74\x24\x1e\x4c\x45\xe3\xc1\x28\x91\x47\x34\x5a\x70\x7c\xfd\x54\x1e\xd1\x6e\x83\xe5\xeb\x23\xf3\xec\x2c\x17\x3c\x9f\x4a\xe7\xd9\x99\x6c\x30\x7d\x3a\x42\xcf\xce\xb8\xca\xf5\xe9\x29\x3d\xbb\x2b\xa8\x6c\x9f\x9e\xd4\x73\x76\x85\x77\x7f\xed\x12\x8b\x35\xad\x67\x39\x68\xec\x19\x3f\x20\xc7\xfb\xc1\x9a\xf5\x03\x6b\xde\x0f\x6c\x99\x3f\xb0\xe5\xfe\xc0\x96\xfd\x03\x07\xfc\x1f\xd8\x33\x80\x5d\x13\xb4\x58\xb4\xc6\x0e\x2d\x34\x0e\xee\x78\x40\x70\xc7\x04\xf6\x9a\xc2\x47\xa7\xfb\x4c\x11\xc2\xe5\x7d\xa6\x08\x31\x73\x70\xcc\x08\x82\x4b\x4e\x10\xec\x59\x41\xb0\xe4\x05\xc1\x35\x33\x08\xa3\xdc\x20\xdd\x9b\x31\x11\x3b\x08\x93\xf1\x83\x30\xc6\x10\x52\x57\x84\x29\xeb\xe5\x08\xa9\x3e\xe7\x82\x3e\xec\x65\x09\x6d\xfd\x06\xa7\x5a\x9e\xd0\x7e\xbf\xa7\x61\x0a\x6d\x96\xc4\xf5\x81\xec\x16\x57\x68\xe3\x21\xef\xd2\x88\xd4\x90\x2a\xd4\x7b\x3b\x67\x44\x22\x4c\x49\x25\xc2\x08\x99\xe8\x62\xc3\xdf\xa5\x13\xe9\x73\x8c\xbc\xd5\x7e\x42\x91\x6a\x59\xa6\x02\xe9\xa3\x14\xa9\xee\x9e\x92\xca\xed\x23\x15\x2d\xf6\xca\xf5\xe1\x61\x1d\xad\x68\x31\x71\x35\xa8\xbc\x01\x62\x91\x3e\xd4\xd4\xc6\x37\xa8\x45\x07\x73\x8d\x9e\x5c\xb4\x18\x13\x95\xa6\xa1\x0b\xb4\x1c\x1a\xc7\x66\xca\xaf\x42\x8b\x60\x74\xf2\xba\xb9\xf7\x24\x35\xd9\x47\x6b\x81\x47\xa0\x8b\x3c\xca\xe2\x88\x81\x04\x67\x1c\x24\xd8\xb0\x90\xe0\x22\x7a\xe7\x86\x89\x04\x27\x5c\x24\x4c\x10\x8e\xb4\xe6\x23\xe1\xb5\x82\x91\x4e\x50\x49\x98\x0c\x97\x84\x11\x64\x92\xee\x34\xef\xc5\x26\xa9\xef\x48\x01\x5b\xea\xd1\x49\x07\xc7\x2e\x66\x2d\x7c\x92\x1c\xe3\x69\x43\x97\x56\xf4\x29\x48\x0f\xb3\x86\xe6\x11\xf1\x63\xaa\x33\xdc\x3d\x7c\x09\xd3\x00\x98\x30\x0c\x61\x12\x2d\x0a\x91\xca\x3e\x10\x93\xba\x46\x7f\x28\xa5\xba\x7b\x60\x4c\xfb\xd0\x6e\x07\xc8\xfc\x63\x84\x76\x1d\x4b\xa0\x42\x7b\xc5\x54\x1d\x9c\x6c\x00\x9a\x16\xa7\x24\xda\x61\xd8\x4e\x68\xda\x6e\x80\xf5\x06\x53\xa9\xcf\xcc\x31\xde\x09\x23\x88\x27\x79\x6e\xf9\xbf\x12\x2b\x55\x71\x4f\x7a\x0f\x2b\x47\xdb\xf4\xc8\xa7\xab\x26\x4f\x15\x26\x3d\x7a\x00\xc9\x02\x1a\x05\x6b\x70\x14\x6c\xe1\x51\x70\x04\x90\x82\x1b\x88\x14\x2c\x41\x52\xb0\x84\x49\xc1\x16\x28\x05\x6b\xa8\x14\x2c\xc1\x52\xb0\x87\x4b\xc1\x0d\x60\xda\x35\x63\x13\x49\x73\x01\x9a\x82\x33\xd8\xb4\xcf\x12\x35\x60\xe5\x02\x3a\xed\xb1\x44\x0e\xa2\xb9\x82\x4f\x35\xed\xc2\xe7\xb8\x06\x62\x9e\x6b\x70\xac\xab\x44\x48\x17\x0c\x53\xe5\xbc\x86\xa3\xe6\xbd\x06\x93\xdc\xd7\x04\x9b\x75\xb6\x6c\xfb\xfc\xd7\xf1\x61\xc7\xb7\x36\xaf\x45\xa3\x5e\x95\x97\xa7\x19\x99\xc6\x8b\x44\x6b\x54\x27\xa4\x5b\x75\xad\xe7\x50\x07\xca\x1f\x84\x43\xad\xc6\x94\xa7\x50\x3d\x85\xda\x2e\x9e\x42\xf5\x14\xaa\x52\x3c\x85\xea\x29\x54\x4f\xa1\x7a\x0a\x95\xd2\xaa\x3f\x24\x85\xaa\x5b\x5e\x79\x06\xd5\x33\xa8\x9e\x41\xfd\xa3\x05\x2a\x3d\x83\xea\x19\x54\xcf\xa0\xd6\xcd\xf6\x0c\xaa\xa6\xc9\x9e\x41\xf5\x0c\xaa\x6a\xdc\x33\xa8\xc6\x97\xf1\x0c\xaa\x67\x50\x3b\x36\x3c\x83\x3a\x68\xca\x33\xa8\xe6\xa6\x3c\x83\xaa\x2d\x9e\x41\xf5\x0c\xaa\x67\x50\x3d\x83\xea\x19\x54\xcf\xa0\xca\xe2\x19\xd4\x46\xf1\x0c\xaa\x67\x50\x3d\x83\xea\x19\x54\x84\xcd\xe3\x85\x22\x3d\x81\xea\x09\x54\x4f\xa0\x7a\x02\xd5\x13\xa8\xc4\xa6\x7a\x02\xb5\x2c\x9e\x40\xfd\xbf\x11\x29\xf5\x04\xaa\x27\x50\x3d\x81\xda\xae\xef\x09\x54\x4f\xa0\xf6\x5b\xf2\x04\xaa\x99\x25\x4f\xa0\x7a\x02\xb5\x2a\xdf\x3b\x81\xba\x0f\xd7\x5f\xdb\x8e\x94\xd7\x81\x51\xaf\x35\x2d\xa1\xd9\x9b\xc6\xb3\x64\xdd\x3e\x75\x44\x07\xf1\x57\xbe\x49\xbf\x0f\xc4\xd1\x73\xf2\xd2\x5c\x0e\x4d\xdc\x22\x8e\xce\xdc\x6d\xc2\x94\xad\x6d\x48\x53\x37\x53\xca\x59\xd9\x0c\xba\xa1\xe3\x8d\x91\xaa\xb1\x24\xb7\xa3\xca\x3e\x7c\x94\x6e\xc2\x43\xc6\x0a\xe8\x54\x0c\x22\xca\xb4\x94\x40\xc6\x22\xb6\xce\xbb\x51\xe2\x7d\x90\x51\xf6\x87\xf9\x36\x4d\x0e\x8f\x5b\xd1\xae\x62\x50\x56\xc7\x2f\xcb\x83\x94\x04\xb3\x67\xe7\x37\x0b\xb9\x7e\xf9\x74\x75\x7b\xbd\xf8\x70\xfe\xd3\xf9\xe2\x6c\x06\xab\xe5\xf5\x0c\xde\x2f\x57\xab\xe5\x25\xfe\xfc\x1b\x6d\xf3\x72\xa2\x6f\x0b\xc1\xce\x6a\x79\x4d\xa8\x25\xef\x16\x59\x31\x3e\xec\xea\xf1\x48\x7d\x69\xc3\x38\x67\x8f\x84\x4f\xf8\x43\x92\xee\x82\x5c\xd4\xff\xdb\x5f\x6c\xdf\xf9\xab\xe6\x9d\xd8\xbd\x46\xfc\x2d\xda\x05\xf1\x8b\xe5\x21\x93\x3c\x81\x20\x8a\xe4\x1b\xc9\x5f\x1a\xdd\x2b\x80\x34\x5b\xbc\xd0\x97\x2c\xdf\x52\x68\x66\x97\x73\xec\x4d\xb3\x29\xbf\x87\x79\x56\x69\xb0\x83\xb9\xf6\xae\x78\x18\x5f\x76\xc2\xe2\x1d\x2d\xce\xda\x3e\xd3\xa1\x46\x62\xa4\xaf\x96\xc5\xb4\xd3\x0d\xe2\xb4\x57\x22\x8e\x62\xd4\x5c\xd8\xf3\x36\x5c\x6f\x05\x67\x4c\xb0\xa8\xa2\x70\xfb\x40\xd2\xc9\xf6\xef\x49\xc1\xe5\x36\xbd\xa6\x0e\xbe\x0d\x97\x8b\xd5\xc7\xe5\x99\xfa\x61\x28\xfe\x8d\x7a\x3a\xbf\xac\x3e\xff\xb5\x36\x75\x7e\x55\xfd\x4d\x3c\x99\x5f\xd4\xbe\x98\xaf\x16\xb7\xab\xe3\x7d\xaf\xba\xfd\x43\x37\x42\xf2\xed\x9c\x34\xfa\xd3\xa2\x32\xc1\xad\x75\xd2\x78\x62\xf4\xca\xf2\x81\x21\xea\x4b\xac\x3d\x48\x5f\xe6\x54\x51\x16\x57\xbb\xa9\x5b\x4d\x4b\xbe\xa7\xdd\x94\x75\xfb\xd4\x58\xfc\x7e\x1f\xbd\x40\x50\xf4\xbf\xc5\x89\xcc\xe0\x21\x67\x69\x2d\x17\x04\x61\x26\x26\xc2\x43\x8e\x75\x94\x5b\xe8\xf9\xd8\x6a\x9a\xb8\x5c\x05\xd8\x6a\x99\x4c\xa3\x63\x32\xa8\x61\x52\xa9\x91\x90\xbd\x35\x6d\xfd\x92\xea\x14\x01\xc1\xe2\x31\xb5\x4b\xdc\xeb\x96\x0c\x69\x96\xd8\xc9\x4d\xe8\xf5\x4a\xba\xca\x23\x04\xd3\xb5\x56\x49\x9f\xea\x08\xc1\xe8\xf9\x03\x04\xce\x15\x47\xdc\xab\x8d\x0c\x2a\x8d\xd0\x56\xb0\xfd\x2a\x23\x65\x27\x50\x86\xaa\x46\x61\x44\xab\x15\x62\x6d\xbb\xab\x13\x42\x31\x49\x3b\x29\xe9\x42\x55\xc4\xe5\x6c\xee\x40\x4d\xe4\x15\x96\x05\x8e\x94\x44\xa6\x52\x11\x99\x42\x41\x64\x02\xf5\x90\x21\xe5\x10\xda\xb1\xb8\x5e\xd5\x90\x4a\xff\x83\x60\xb4\xab\x18\xd2\xd2\xfe\xa0\x7d\x83\x2b\xed\x04\xbd\xee\x07\x69\x5a\x14\x43\xb1\x5f\xf3\x83\xec\x51\x72\x7f\x2c\x6c\xe0\x48\x18\x04\xa4\xc3\x76\x3d\x3a\x1f\x36\x47\xc2\xf4\x1a\x1f\xb5\x5a\x07\x65\x91\xd1\xd1\xf7\xe8\x57\xea\xa0\x8c\xd5\x74\x48\xa5\xc3\x4e\x0e\x52\xa3\xd0\xd1\xd0\xda\xa0\xb9\x76\x26\x51\xe7\x18\x56\xe6\xa0\x4f\x81\x13\x9d\x35\x9b\xee\x9c\xd9\x80\x1a\x07\xc4\x09\xf1\x91\x4d\xa3\xc4\x31\x89\x0a\xc7\xb0\x02\x87\x85\x9b\xaf\x57\x7d\xa3\xa9\xa3\x41\xb7\xdb\x56\xde\x68\x69\x68\xd0\x0d\x4b\xd5\x0d\x8d\x7e\x86\xa5\xc9\x42\x71\x43\xaf\x9d\x41\xb7\xad\xaa\x6d\xe8\x75\x33\x9c\x58\xff\xeb\x3b\x9d\xf5\x77\x36\xd6\x35\x2a\x1b\x0e\xf5\x32\xa8\xfe\x57\x1b\x9d\x0c\x3b\x8d\x0c\x2b\x7d\x0c\x2b\x6d\x0c\x2b\x5d\x0c\x5b\x4d\x0c\x4b\x3d\x0c\x17\x5a\x18\x4e\x74\x30\x1c\x69\x60\x38\xd2\xbf\x70\xa4\x7d\xe1\x48\xf7\xc2\x91\xe6\x85\x4b\xbd\x0b\x67\x5a\x17\x96\x3a\x17\x36\x1a\x17\x4e\xf5\x2d\x86\xb5\x2d\x68\xcc\xcc\x44\xba\x16\xd3\x68\x5a\x0c\xea\x59\x10\xc1\xe5\x3e\x2d\x8b\x4a\x95\x82\x18\x9d\xd6\xe9\x58\x34\x14\x29\x08\x56\x75\x1a\x16\xb6\x31\xe4\x8e\x7e\x45\x4b\x89\x82\xe6\x6a\x68\x6a\x57\xf4\xa9\x50\xd8\x6d\x65\xdd\x29\x50\x4c\xa6\x3e\x31\xa4\x3c\x61\xbb\x89\xed\xaa\x4e\xd4\xfa\x11\x94\xf1\xda\xa7\x38\x41\x3f\x4c\xdb\xa7\x36\x61\xa3\xbe\xd2\xa7\x34\xa1\xca\x2e\xd0\x36\xdb\x9d\xe9\xa9\xa5\x17\x41\xe9\xd5\x7e\x85\x89\xe2\xe9\x59\xc4\xe8\x14\x75\x09\xcb\x59\x40\xaf\x2c\xd1\xd0\x88\x20\x7a\xb2\x5a\x93\x73\x5b\x1f\x82\xe4\x79\x52\x14\x25\xda\xda\x10\x94\x31\xa5\x7d\xdd\xdb\xba\x10\xa4\x0e\x70\xa2\xa1\xee\x48\x0f\xc2\x8d\x16\x04\x59\x07\xc2\x52\x32\xc1\x85\xfe\x83\xbd\xf6\x83\xcb\x90\x91\xb5\xe6\xc3\x2b\x04\x8c\x9c\xe8\x3d\x4c\xa3\xf5\x30\xa4\xf3\x40\x8b\xc2\xf4\x6a\x3c\x54\x6a\x0d\xd4\xe8\x7c\x5b\xdf\x41\x55\x6a\x20\x59\x6d\x6a\x3b\x68\x54\x1a\x28\xab\xe2\xb2\xeb\xf4\x0a\x0d\xe4\x78\xde\x04\xea\x0c\x13\x28\x33\x0c\xa8\x32\x50\xe5\xcc\xfb\x14\x19\xe8\x4a\xee\xfd\x6a\x0c\x8d\xe0\x19\xc5\x8b\xdd\x56\x62\xe8\x04\xcf\x48\x27\x5e\x5a\xe1\x36\x35\x78\x46\xf9\x70\xb7\xc3\x6d\xed\xc0\x19\x71\xd1\xa2\x17\xc7\x9f\x5b\x28\x5d\xd4\xca\x0b\x5a\x0d\x05\x87\x61\xc2\xae\x7e\x02\x79\x8f\xe1\x5c\x3b\x61\x28\x8e\xd5\x88\x4a\xd1\xce\x10\xf5\xaf\xe2\xac\x12\xe2\xf4\xc6\xb1\xca\x8e\xb1\x5e\xc0\x57\x5a\x09\x6d\xd5\x03\x52\xa4\x48\xa7\x93\xa0\x51\x3c\xa0\xec\x92\xab\x10\x96\x23\xb5\x03\xaa\xf3\xdf\x42\xe5\xc0\x4e\xe1\xc0\x4a\xdd\xc0\x85\xb2\x81\x03\x55\x03\x1b\x45\x03\x1b\x35\x03\x2b\x25\x03\x3b\x15\x03\x1b\x05\x03\x4b\xf5\x02\x07\xca\x05\x2e\x54\x0b\xdc\x28\x16\xb8\x51\x2b\x70\xa3\x54\xe0\x46\xa5\xc0\x8d\x42\x81\x3b\x75\x02\x2b\x65\x82\x5c\xdd\xd3\x5d\x04\xf1\xe3\x21\x78\x64\xe6\x33\x2c\x69\x73\xdb\xda\xd4\xae\xf4\x8d\x30\x36\xa7\xee\x0d\x25\x74\x5f\x9d\x00\x7c\x60\xb9\xca\x79\x99\x7e\x73\x0e\x71\x98\x2f\x9f\x58\x9a\x86\x9b\x23\xf7\xc7\xa7\xc6\x95\x69\x9d\xc0\x97\xcd\xbc\xfd\x7c\x81\x27\x16\x64\x85\xf7\x54\x2c\xc5\x10\x23\x4c\x74\x9e\x0a\xc4\x17\xe7\x8e\x63\xe9\x56\x42\x9f\x8c\x16\xad\x12\x47\xd8\x93\xe2\x0e\x21\x88\x5f\xe4\x3f\x8b\xa8\x42\xb0\x16\xa8\x5b\x8c\x73\x49\x96\x0d\x95\x27\xd5\xf9\xed\x4b\xda\x95\x2f\x36\x2a\x0f\x65\x80\xb1\x28\x10\x81\x3b\xde\xae\x3b\x73\x71\x8a\x94\x3d\x7c\x79\xfa\xf1\x6d\xca\xb2\xfc\xed\xd3\x8f\x6f\x4b\x29\x88\x53\xb9\x74\x3f\x2b\x1e\x51\x82\xd3\xae\x28\x9c\x81\x31\xdc\x5d\xb6\xcc\xdc\x0d\x8f\xe5\x7c\x9b\xb2\x6c\x9b\x18\x04\x2f\xcd\x43\x95\xed\x57\xb7\xba\xc4\x48\xbd\xd6\xf0\x2c\x6a\x41\x14\xc6\x7c\xcf\x90\x06\xcf\x31\x6c\x93\x34\xfc\x17\x1f\xa7\x7c\x87\x2f\x43\x0b\xa3\xbd\xc4\x9f\xd2\x7a\x1b\xa4\xf9\xd8\x5b\x4d\xf2\x87\x52\xbc\x9f\x86\xbe\x4e\x14\x58\x46\x70\xda\x19\x3f\x18\x2c\x1b\xb5\x4e\x22\xd3\xfc\xf8\xe8\xb9\xb0\x7d\x3e\x9e\x5f\x0a\x57\x91\xd2\x41\xe2\x32\xc6\x4e\xc9\xee\xb9\xf5\x2c\x0f\x72\x26\xfb\x45\x78\x80\x84\x07\xbf\x7a\xf7\x4c\x3f\x37\xa2\x15\x65\xfc\x40\x80\xec\x72\xdf\x1c\x40\xd1\x60\x74\xfa\xac\x32\x5d\xd6\x87\xe5\xc5\xf2\x46\xc5\x72\x7f\xbe\x59\x7c\x9e\xc1\xfb\x8b\x4f\x0b\xf1\xf7\xc2\xf8\xc0\xde\xe7\xc5\xc5\xc5\xf2\x97\x19\x2c\x6f\xf8\x22\x43\x9c\xea\x32\x3b\xc0\x65\xbe\x6f\x3b\xe9\x36\xd8\xb0\x1e\xbf\x2b\xc3\x9f\xf2\x5b\x37\xb7\xba\x30\x5b\x95\x9d\x14\xdd\x63\xf8\x63\xd9\x87\x86\x3f\xbe\x31\xea\x05\xa4\x20\x8c\xed\x0b\x8a\x13\x7e\xb1\x7a\x49\xf1\xb2\x2d\xdd\x17\xb5\xea\x9d\xe2\x35\x65\xb0\x3e\xa4\x22\x6e\x8d\x7e\x5b\xab\xe6\x4c\xf2\xc6\xf6\xa8\xac\xcc\xdf\x2f\xff\xb9\x98\xc1\xfb\xc5\xc5\xf2\x17\xf7\x6f\x1d\x5d\x4e\xe5\x44\xb6\xcc\xf4\xd5\x5b\x98\xbd\x22\x51\x70\xcf\xa2\xe3\x8c\xe3\x0b\x7e\x29\xfc\xa0\x9a\xcb\x36\x56\xa3\x09\x35\x8a\xc4\xb3\xc6\xdc\x5e\x7c\xd8\xdd\x1b\x7a\x50\x4b\xa5\x97\x4d\x72\x30\x1d\x7c\xad\x1e\xf9\x27\x6f\x1d\xbe\x47\x56\x4d\xaa\x49\xed\x92\xfa\xbf\x19\xbe\x0c\xd9\x36\x39\x44\x1b\x41\xba\xca\x68\x7a\x09\x0e\xc5\x41\x1e\x3e\x31\xc8\xd6\x41\xc4\x70\x61\x10\xb9\x0c\x1f\x59\x34\x87\x3b\x96\x6d\xc3\x87\xfc\xec\x90\x1a\x09\x06\x20\x06\x9f\x66\xdf\xab\x5c\x0a\xb7\x86\x66\xb0\x29\xea\x35\x00\xe9\x6c\x1f\x05\x2f\x10\x14\x7a\x21\x61\x66\x30\x37\xcb\xf5\x33\xcc\x1b\x75\xe4\xbf\x41\x16\xee\x0e\x51\x1e\xc4\x2c\x39\x64\xd1\x0b\x7f\x22\xcf\x99\x29\xf7\xf5\x90\x26\x3b\xc8\x9f\x13\x6e\x24\x8c\x82\xf4\x24\x62\xf1\x63\x5e\xec\xc4\xa5\x8f\x39\x83\x3f\xb1\xd3\xc7\xd3\x19\x3c\x33\xf6\xf5\x84\xef\x07\x4f\xf8\x5f\xa3\xa6\xe5\x63\xcc\xfe\x7c\xaa\x76\x42\xe9\xb0\xde\x27\x59\xc8\x47\x88\x04\x3e\x42\x21\x61\x3d\x6a\x33\x89\xa3\x76\xf8\x4a\xf4\x41\xc1\x29\x89\x4d\xb4\x38\xde\x97\x3c\xc0\xc5\xf9\xd5\x02\xf6\x91\xc1\x39\x1c\x3e\x36\x86\x87\xdb\xb7\xf9\xb7\xd0\x70\x7b\x66\xb4\x59\x68\x0d\xb1\x5f\xb9\x79\xd3\x2a\xe6\xdf\x65\x13\xb3\x9d\xd1\x5a\xef\x33\xda\x7c\xf9\xaf\x10\x7c\x0b\x47\x1d\x33\x98\x7d\x8a\xf1\xc7\x03\xf9\xe9\x20\x7e\x38\x3a\x7d\x21\x3f\x1c\xc5\x24\x69\x72\xf3\x20\xe7\xbb\xe9\x6f\xe9\x96\x5f\x06\x53\x0d\x39\x6a\x84\x7d\xc3\x45\x5c\x77\x09\xc7\xbb\x4a\xf6\xc4\x29\xbc\x7f\xe1\x5f\x86\xe0\x10\xe5\x33\x08\x84\x27\x21\x30\xfb\x3c\xca\x2f\x47\xa1\x2b\xd1\x65\xa2\xe0\xf6\xc3\xfc\x62\xa1\x2c\xbd\x8c\xcc\xf2\x69\x61\x7e\x33\x83\x8b\xe5\xcf\x3f\xbc\x33\x59\x9f\x99\xae\xce\x4e\xba\x2d\x32\xaa\x25\x1b\x64\xf6\x53\xde\xe6\xc1\x5f\xbe\x4c\x3b\x51\x7d\x9e\x66\xa2\x32\x31\x8b\x98\xa8\x3e\xfb\x89\xea\x8f\x30\x51\x7d\xf6\x13\x95\x52\xbe\xbb\x89\xea\xff\x0f\x00\x00\xff\xff\x64\x1b\xfe\xae\xc0\x02\x0a\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\x5d\x73\xdc\xb8\xb1\xf0\x7f\xbf\x9f\xa2\xeb\xe4\x5f\xe5\xa4\x4a\x92\xbd\xd9\x6c\x52\xf1\x73\x35\xb6\xc6\x6b\xd5\x4a\x1a\x3d\xd2\x38\xde\xad\x3d\xa7\x2c\x68\x06\x9a\xe1\x63\x0e\x39\x21\x39\x92\x75\x52\xf9\xee\xff\x02\xc0\x77\x82\x24\xba\x01\xca\x4e\x02\xde\xac\xd7\x1e\x34\x41\xbc\xa3\xbb\x7f\xdd\xbf\x83\xb7\xf1\xfe\x29\x09\x36\xdb\x0c\xfe\xf8\xea\x8f\x7f\x84\x9f\xe2\x78\x13\x72\x38\x3f\x7f\x7b\x02\xb3\x30\x84\x6b\xf1\x4f\x29\x5c\xf3\x94\x27\x0f\x7c\x7d\xf2\xdd\xef\xe0\xbb\xdf\xc1\x79\xb0\xe2\x51\xca\xd7\x70\x88\xd6\x3c\x81\x6c\xcb\x61\xb6\x67\xab\x2d\x2f\xfe\xe5\x08\xfe\xc6\x93\x34\x88\x23\xf8\xe3\xc9\x2b\xf8\xbd\xf8\xc1\x7f\xe5\xff\xf4\x5f\x7f\xf8\x3f\xdf\xfd\x0e\x9e\xe2\x03\xec\xd8\x13\x44\x71\x06\x87\x94\x43\xb6\x0d\x52\xb8\x0f\x42\x0e\xfc\xcb\x8a\xef\x33\x08\x22\x58\xc5\xbb\x7d\x18\xb0\x68\xc5\xe1\x31\xc8\xb6\xf2\x35\xb9\x10\x51\x8f\x5f\x73\x11\xf1\x5d\xc6\x82\x08\x18\xac\xe2\xfd\x13\xc4\xf7\xf5\xdf\x01\xcb\x54\x8d\xc5\xb3\xcd\xb2\xfd\xeb\x97\x2f\x1f\x1f\x1f\x4f\x98\xac\xed\x49\x9c\x6c\x5e\x86\xea\x97\xe9\xcb\xf3\xb3\xb7\xf3\xcb\x9b\xf9\xf1\x1f\x4f\x5e\xa9\x32\x1f\xa2\x90\xa7\x29\x24\xfc\xef\x87\x20\xe1\x6b\xb8\x7b\x02\xb6\xdf\x87\xc1\x8a\xdd\x85\x1c\x42\xf6\x08\x71\x02\x6c\x93\x70\xbe\x86\x2c\x16\x35\x7e\x4c\x82\x2c\x88\x36\x47\x90\xc6\xf7\xd9\x23\x4b\xf8\x77\xbf\x83\x75\x90\x66\x49\x70\x77\xc8\x1a\xcd\x55\xd4\x2f\x48\x1b\x3f\x88\x23\x60\x11\xfc\xd7\xec\x06\xce\x6e\xfe\x0b\xde\xcc\x6e\xce\x6e\x8e\xbe\xfb\x1d\x7c\x3c\x5b\xbe\x5f\x7c\x58\xc2\xc7\xd9\xf5\xf5\xec\x72\x79\x36\xbf\x81\xc5\x35\xbc\x5d\x5c\x9e\x9e\x2d\xcf\x16\x97\x37\xb0\x78\x07\xb3\xcb\x5f\xe1\xe7\xb3\xcb\xd3\x23\xe0\x41\xb6\xe5\x09\xf0\x2f\xfb\x44\xd4\x3f\x4e\x20\x10\x0d\xa9\x7a\xef\x86\xf3\x46\x05\xee\x63\x55\xa1\x74\xcf\x57\xc1\x7d\xb0\x82\x90\x45\x9b\x03\xdb\x70\xd8\xc4\x0f\x3c\x89\x82\x68\x03\x7b\x9e\xec\x82\x54\x74\x67\x0a\x2c\x5a\x7f\xf7\x3b\x08\x83\x5d\x90\xb1\x4c\xfe\x4d\xe7\xa3\x4e\xbe\x0b\xa2\xfb\xf8\xf5\x77\x00\x59\x90\x85\xfc\x35\x5c\xc4\x51\x90\xc5\x49\x10\x6d\x5e\x9e\xb2\x74\x7b\x17\xb3\x64\xfd\x1d\xc0\x9a\xa7\xab\x24\xd8\x0b\x29\xaf\x61\xb9\xe5\xb5\xdf\x41\xf9\x3b\x48\x78\x1a\x1f\x92\x15\xff\x0e\xe0\xcb\xf1\x7a\x15\x1e\xa7\x59\x72\x58\x65\xc7\x11\xdb\xf1\xd7\x50\x97\xa7\xfe\x79\xcb\xd2\xe3\x80\xed\x5e\xc3\x3d\x0b\x53\xfe\xdd\x9e\x65\xdb\x54\xd4\x65\xc3\x33\xf1\x1f\xcd\x6b\xef\x0f\xd1\x4a\xfc\x9f\x18\x88\xb2\x27\x37\x5c\x8c\xbf\xfb\x38\xd9\xc9\x4f\x04\x76\x17\x1f\x32\x60\x8d\x97\x01\xec\x59\xc2\x76\x3c\xe3\x49\xaa\xe4\x1e\x43\xb7\x4a\xe2\x29\xc6\xcf\x6b\xc8\x92\x03\xcf\xff\xb2\x51\x89\x19\xdc\x1f\xc2\x10\x82\x28\xcd\xe4\x68\x8f\xef\x5b\x2f\x13\xc3\xee\xc9\xb4\xf6\xf2\xc7\xdf\x56\xfd\xd7\x3c\xe4\x19\x37\xfd\x00\xf5\xeb\xaf\x5f\xdf\x59\x18\x62\xab\x1c\x86\x86\x95\xde\x27\xf1\xff\xe3\xab\x6c\xa8\xca\xe9\x6a\xcb\x77\xec\x75\xfe\x7f\x00\xd9\xd3\x9e\xbf\x06\xb1\x56\x44\x9b\xef\x00\xc2\x20\x35\x1e\xd0\xe2\xb7\x9a\x11\xb1\x63\xd1\xd3\x33\xd5\x57\xac\xe5\x71\xc4\xa3\x4c\xca\x55\x3f\xcd\x5f\x51\x56\xa0\x28\x9a\xaf\x1a\xed\x7e\x55\xd3\x3b\x58\x97\x95\x49\x5f\xfe\xe3\x1f\xf9\x1f\xff\xf9\xcf\x97\xeb\xe2\xe7\xe2\xaf\x45\xa5\xff\xf9\xcf\x46\xc1\x3d\x4b\x78\x94\x1d\xaf\xe2\x48\x6c\x17\x3c\x69\x7f\x53\xb5\x7a\xac\x12\xce\x32\xde\xf8\x36\xed\xd2\x52\xff\xa7\x84\xb3\xf5\x71\x16\xec\x78\x7c\xc8\x5e\xc3\xab\xc6\xbf\xc9\x09\xd9\xf7\x8f\x6a\xe4\x74\xff\x55\xb5\x5e\x7c\xa7\x6b\xf5\xfc\xff\x55\xdf\x94\xff\xb3\x0e\xd2\x7d\xc8\x9e\x2e\xeb\x7f\xd7\xfc\xc4\x7d\x12\xef\x79\x92\x05\x3c\xad\x7a\x69\x15\x87\x87\x5d\x74\xce\x9e\xc4\xdb\xcb\xbf\xd5\xbe\xbf\xaa\xf4\x26\xce\x97\xdf\xb7\xb5\xd2\xba\x5f\x29\x29\x65\x4f\xf6\xfc\xbc\x33\x80\x45\x27\xf1\x28\x53\xbb\xe3\x43\xb0\xe6\x6b\x08\xa2\x2c\x06\xfe\xf7\x03\x0b\xc3\x27\x48\xf7\x6c\xc5\xd7\x79\xdd\xe5\x9e\x24\xf6\x9f\x9a\x44\x80\xc7\x60\xbd\xe1\x59\x0a\x2c\xe1\xc0\x92\x84\x45\x1b\xbe\x86\x07\xf1\xf5\x2b\x21\xe2\xa4\x53\xdb\x55\x1c\xdd\x87\xc1\x2a\x4b\xeb\xad\x70\x0c\x9b\x24\x58\x77\x2a\x7c\x0c\xbb\x38\x65\xc1\x4a\xf3\x0f\x49\xfc\xd8\xf9\x5b\x5d\xbb\x57\x6d\xdf\xfa\xcb\xa2\xe9\x45\xa5\x9f\x5a\xff\xa2\x6d\xfd\xb4\xf5\x23\x4d\x6b\xaa\x66\x8a\xef\xcb\x86\x15\x4b\x96\x1a\x2e\x27\xda\x57\xa4\x3c\x5a\x1f\xf3\xdd\x3e\x7b\x6a\x4c\x83\xe6\x8f\xc4\xaa\x92\xf7\xb0\xf8\x63\xeb\x37\x41\xc6\x77\x9d\x4f\xeb\x1d\x57\xad\xef\x1b\x18\x37\xfa\x8f\xee\x6f\x64\xf5\x3c\x72\x71\x90\xd5\xfd\x4b\x51\xa5\x20\xca\xf8\x86\x27\xda\x5f\xa8\x85\x53\xfe\xe6\xcf\x7f\xd2\xfe\xa2\xd5\x33\x1f\xe5\xeb\xb4\xbf\xec\x74\x4f\xc2\x43\x96\x05\x0f\x3c\xaf\xa3\x3a\xc1\x06\x69\xde\x6d\x27\xb5\x2e\xcc\x7f\xa1\x15\x0b\x62\xb2\x94\x47\x80\xf5\xff\x3b\xa4\x99\x3c\x93\x3d\x06\xeb\x6c\xab\xba\x3e\x1f\x05\x91\x3a\xef\xad\x12\xce\x23\xf8\x7d\xf1\xf6\x1e\xa1\x59\x0c\x7b\xce\x93\xf4\x0f\x27\xf0\x93\x5c\x16\xd5\x49\x4f\x55\xe4\x08\x36\xf5\xbf\x2b\xde\x94\x95\x15\xee\x11\xda\xa8\xc2\x09\x9c\xdd\x43\xbc\x0b\xb2\x8c\xaf\x8f\x80\xc1\x03\x0b\x0f\x72\x4b\xfe\xbe\xfc\xa2\xc7\xad\xb8\x18\x24\x5c\x1c\x34\x83\x68\xd3\x1e\xb2\x50\x4d\xf7\xa1\x0e\xd6\x4d\x28\x7d\xe7\x29\x59\x66\xbd\x97\x4f\xa3\xda\x7a\xd3\x59\x6b\xc4\xcd\xa0\xde\xa3\x03\x75\x18\x99\x77\xf5\x9f\x0e\xcf\x3e\xf5\xf4\xcc\xc1\x7a\xab\xf4\xcc\xc4\x56\xc3\x8c\xcf\xc7\xa1\x46\x1b\x9b\x9c\xea\xb9\x0b\x59\xf4\xb9\xff\x9f\x8d\x2a\x0c\xdd\xde\x7c\x23\xc4\x9a\xfc\xdc\xf8\x1b\xc7\x24\xb6\xce\x9a\xf2\xbb\xd4\x96\xa5\xef\xfa\x7a\x3d\xb4\xbb\x50\xfb\x39\x86\x2f\x4f\x6f\xb7\x2c\x19\x6a\x85\x63\x48\x57\x71\xc2\x57\xd5\xf9\x49\xff\xab\x8c\x7f\x19\x16\x13\xc6\x9b\xf4\x8a\x45\x3c\xec\xfd\x55\xf9\x0b\xf7\x9d\x77\x3e\xfa\x72\x4a\x07\x9a\x4b\xfd\xf6\xba\xe3\x6e\x64\xf0\x99\xcc\x34\xf1\xdc\x07\x61\xc6\x93\xe1\xdf\x74\xee\x1c\x43\x4f\xab\xe7\xde\x49\xf9\x23\x65\xda\xb7\x32\x59\x06\xb2\x2d\xcb\x60\xb5\x8d\xe3\x94\xa7\x62\xed\x5f\x6d\xc5\x10\x03\x1e\x65\x49\xc0\xfb\x56\x98\x5a\x9d\x63\x48\x78\x76\x48\xa2\x13\xa9\xec\xf8\x6d\xb6\x7e\x10\xb7\xbc\xb5\x1c\x4c\xf0\x7f\x0f\x5c\x48\xf9\x9f\xdf\x6f\xb3\x6c\x9f\xbe\x7e\xf9\x72\x15\xc6\x87\xf5\xc9\x46\xea\xbd\x4e\x56\xf1\xee\x65\x18\x6f\x36\x41\xb4\x79\xb9\x8e\x57\xe9\xcb\x87\x80\x3f\xbe\x64\xb9\x84\xe3\xbf\xab\xc2\x7f\x18\x9a\xc7\xea\x59\x44\xe1\x53\xbd\xd6\xea\xa3\x76\x2c\x5b\x29\x3d\x56\xfe\xa9\xe2\x74\xaa\x2a\xcb\xd7\xe3\x42\x67\x11\xc8\xbd\xa1\x28\x2d\xc5\xf1\x54\x5e\x3b\x6b\xef\x1a\x16\x54\xe8\x53\xc4\x3d\x61\x64\x8c\x8c\xef\x9c\xd5\xd3\xea\xfe\xeb\xfa\x5b\x30\xa3\x40\xec\xa9\x42\x84\x3c\xae\xe6\x5d\x51\xd6\x39\x15\x9d\xbb\x8a\xc3\x70\x78\x11\x51\x8f\x58\x96\xc4\xc9\xed\x04\xde\x1e\x12\x71\xf7\x0b\x9f\x20\x16\xdd\x52\x5c\x1f\x65\xeb\xa7\x87\xfd\x3e\x4e\x32\xbe\x16\xe7\x90\x51\x99\xb2\xf9\x8f\x8a\xe3\xce\x86\x67\xf0\x18\x84\x21\xac\xf9\x3d\x3b\x84\xf2\x58\x2d\xfe\x69\x1b\xa7\x59\xf1\x96\xb1\x5e\x45\xec\xfb\xed\x42\x26\x27\x80\xea\x19\x3c\x0b\x54\x0f\x62\xc6\x77\x96\x5e\x54\xa9\x84\xdf\xf3\x84\x47\xab\xf1\x41\x28\xaf\x55\xf9\x08\x78\x0d\x6f\xc5\x7c\x2d\xfe\x77\xc7\x22\xb6\xe1\xc9\xcb\xab\xc6\x3d\x77\xe8\xb9\x0f\x78\xb8\x7e\x5d\xbf\x39\x77\x9f\x72\xa9\x76\xbf\xa1\xdd\x18\xec\x02\xf8\x0d\xcd\x44\x6a\x6b\xad\x2d\xbf\x11\xd2\xc3\x6e\xc7\x92\xe0\x7f\xc5\x44\xcb\x82\x1d\x87\x54\x2e\x72\xb0\x66\x19\x7b\xd6\x03\x8b\xf5\xde\x67\x72\x58\xe9\xaa\x51\x7a\x2a\x13\xec\xf8\x8d\x6c\x08\xb1\x63\x0c\xad\x7e\xa6\x3b\xee\x86\x1d\x36\xfc\x6f\x01\x7f\x34\x5b\x74\x47\x47\x16\x74\x47\xd7\x4f\xc5\x2b\x4c\x8b\xe1\x47\x98\xe9\x2b\x1a\xc3\xed\xa3\x58\x24\x57\x4c\x19\x7a\x78\x6d\xec\x65\x31\xa4\xdb\xf8\x11\x98\x6a\x9d\xd1\xf9\xbb\x12\x83\x68\x6c\x49\x35\xed\x10\x90\x9b\xc4\x23\x4f\xde\xc4\x87\x68\x70\x38\x14\x8f\x6a\xae\xe8\xb0\xbb\x1b\x3d\xda\x40\x4d\x6d\xb0\x8e\x0f\x77\xe1\xf8\xc7\x69\x4e\xbf\x45\xe5\x0c\xca\x76\x76\x51\xf9\x69\x70\x27\x8a\xe7\x56\x9e\x20\x55\xcd\x6c\xd6\x8c\xea\x11\x92\xca\x2b\xb9\xbc\xd9\x8b\xb2\xa2\xd7\x0e\xe1\x1a\x58\xf8\xc8\x9e\x52\xb8\xe3\x85\x26\xc0\x48\x66\xb6\x65\x11\xc4\x89\x52\xe7\xa9\x4d\x33\x18\x39\xb5\x88\xe7\xb0\xdf\x7f\xbb\x7d\xf5\xa1\xac\x1c\xa5\xaf\xe4\xa7\x3d\x5b\x5f\x49\xb3\xa6\xe8\x04\x23\xa9\xb8\x8e\x4a\xf7\x2c\xf9\x2c\xd7\xfa\x69\x97\xba\x9b\xc6\x7b\xa6\x5b\xef\x50\xef\xc1\x2e\x7a\xb2\xb1\xdc\x2c\x7a\x26\xbb\x1a\xa8\x8b\x68\xf9\x45\xcb\xa7\xfd\xd8\x48\xc7\xac\xa5\xbb\x20\x9a\x85\xc1\x26\xda\xf1\x28\xbb\xe2\x49\x10\x23\xe6\xa9\xe1\xd1\xb1\x33\x0c\x2e\x3a\xef\xb4\x5d\x2a\xe3\x48\x9e\x7c\x60\x1f\x07\x51\x06\xf7\xa2\x5d\x79\xb4\x1a\xbb\xfe\xa8\x27\x88\x6a\xd3\x2e\xd8\xed\x43\x2e\xea\xa5\x5c\x07\x94\x81\xfd\x49\x9e\xb2\xb6\x5c\x34\x56\xb0\x3b\xec\x8c\xc4\xb2\xe2\x0b\x61\x2f\x3f\x51\x8c\x20\x31\xac\xa4\xd7\x43\xfd\xc8\x26\xee\xa7\x1d\x8d\xbe\xfe\x79\x27\xa6\xf5\x17\x26\xea\x78\x04\x81\x5a\x2e\xe4\x77\x07\x29\xec\x0f\x77\x61\x90\x6e\xa5\x3f\xc2\x8a\x03\x7f\x18\x3e\x00\x55\xcf\xf7\xaf\xc4\x77\x1d\x32\x9e\x42\x90\xc1\xa3\x5c\x76\xa2\x58\xdc\x7b\x3f\x8b\x3a\x46\x62\x2a\xc4\x70\xcf\xc5\x2d\x98\x19\xad\x94\xf9\xc7\xab\xaa\xb1\x0c\xe2\x88\xe7\xaf\x90\x0a\xfb\xe4\x81\x85\xe9\x09\x2c\x95\xf7\x08\x0f\xcd\x44\x06\x29\xc4\xb2\xf7\x59\x28\x6d\x47\xfc\x4b\x90\x66\xa9\xba\x1c\xb2\x14\x18\x6c\x83\xc8\x60\xc9\x6d\x4e\xa4\xe9\xc7\xfa\x0d\x66\xe2\x82\xcb\x65\xaf\xf9\xe6\x79\x64\x34\x6e\x1b\xb3\xec\xc5\x75\xbe\x3e\x29\x7b\x86\xa8\x90\xd8\xa4\x64\x13\xaa\xe9\x92\xc5\x46\x7d\x27\x57\xce\x52\xa1\x5e\x54\xf0\x04\xae\xe2\x34\x0d\xee\xc2\x7c\x03\x4c\x5f\xc3\xcd\xd5\xec\xfa\xe7\x4f\x6f\xdf\xcf\xae\x97\x9f\x96\xbf\x5e\xcd\x3f\x7d\xb8\xbc\xb9\x9a\xbf\x3d\x7b\x77\x36\x3f\x3d\x32\x7a\x95\x92\x70\x7e\x76\x39\x3f\xca\xff\xfc\x66\x76\xfd\xc2\xa0\x28\x8f\x0e\x3b\x93\xe1\x70\x3c\x58\x49\x84\x00\x51\x47\xc4\xcf\xdf\xcc\xae\x07\x7f\x9d\x6d\x13\x9e\x6e\xe3\x70\x3d\x9d\x7e\x68\x59\xbe\x02\xb3\xa5\xbe\x90\x63\xa7\x2c\x5a\x73\xbf\xc8\x78\xb2\x0b\xa2\x7c\xa7\xcd\x58\x36\x3e\x3d\xf2\x23\x52\xb5\x2d\x6f\x82\x07\xae\xd6\xef\xda\x92\xfa\xe2\x05\xac\x94\xf6\x48\x0d\xac\xf1\x55\x41\x2c\xac\x2c\x02\xb6\xca\xc4\x99\x49\x1d\xc7\xbe\x1c\xb5\xde\x15\xa4\x6a\xf1\x5e\xb3\xa8\xcf\xe6\x58\x7f\xe4\x17\x89\x55\xfa\x8b\x28\x59\x1e\xdf\x1a\x47\xb3\x42\x58\xd5\x3e\xe3\xba\x9e\x2d\xcb\x20\x4b\x82\xcd\x86\x27\xe2\x60\x18\xc6\x8f\x47\x42\x66\x65\xd5\x6b\xbf\x63\x54\x64\xbb\x0e\xad\x77\xb0\xbb\xf8\x81\x9f\xc0\x4d\xb0\x0b\x42\x96\x84\x4f\x62\xeb\x19\xd7\x17\x89\x8f\x96\x25\x5f\xca\x3a\x02\x83\x47\xa6\x3c\xd3\x7a\xde\x33\x2a\xb2\x26\x4d\x76\x4d\xd4\xd3\x3f\xc5\x7b\xcc\x86\xd4\x31\x1c\x94\xcb\xe0\x17\x60\x61\x1a\xc3\xfe\x90\xc9\x8d\xb0\xd6\xd5\x4a\xd2\x09\xfc\xfe\xd4\xac\xe7\xb3\xe4\xb0\xdb\xa7\x45\x3d\x4e\xfe\x00\x30\x4b\xc5\xf0\x2a\xb7\xee\x55\x1c\xa5\xc1\x9a\x27\xe2\x34\x69\xa0\x05\x52\x4f\xe9\x4e\x79\x1f\x87\x61\xfc\x28\xbe\xf0\x3e\x3e\x24\xf5\xa9\xf5\x0f\x00\x35\x78\x5f\xc3\x5f\x5f\x1d\x89\x53\x28\xcb\xf8\x26\x4e\x9e\xc6\x17\xb6\x17\x2f\x4e\x67\x97\x3f\xcd\xaf\x5f\xbc\x38\x92\x5f\x20\xfb\xe4\x35\xbc\x78\x31\x7b\xb3\xf8\xdb\x5c\xfc\xed\x3f\x8f\x5e\xd7\x5f\xf0\x17\xf4\x0b\x3e\xce\xae\x2f\xcf\x2e\x7f\x1a\x7a\x43\xfd\x05\xdf\xbb\xfa\x82\x37\xf3\xf3\xc5\xc7\xae\xfc\x3f\x3a\xfb\x80\xea\x05\xf2\x3a\x17\xbd\xce\xf7\xb3\x71\x45\x86\x76\x51\xf8\xfe\x55\x7e\x0a\xdb\x1f\xb2\xf6\x10\x1f\xbf\xfd\x31\x50\xcd\xa0\x06\xed\x51\x5e\x95\xe6\xf2\xf0\xfd\x2b\xb8\x3b\x64\x88\x2b\x65\xbd\x7a\x7f\x7c\x05\x0c\xf2\xa6\x68\xbd\x44\x1c\x92\x56\x59\xf8\x04\x77\x3c\x7b\xe4\x7c\x5c\xae\x90\x15\xad\xe1\x2f\xe2\x3f\xb0\xf8\x79\xa8\xce\x45\x15\x4c\x4c\x49\x7f\x69\x7d\x1f\xfc\x55\x53\x67\xf1\x62\xc3\x7e\xea\x5d\x5b\xa5\xdc\x7a\x7b\x9f\x8c\x1d\x35\xbe\x4d\xd3\x81\xd1\x65\xde\xea\x6c\x6a\x7c\x76\xc0\x5d\x5f\xa5\x3f\x56\x3c\x6a\x12\xad\x7f\xab\xf1\x49\x5e\xe7\xba\x15\x9b\x29\xcb\x9c\x34\x94\x7c\x9d\xe1\xb9\x5d\x77\xe6\x52\x07\x10\xd9\x3e\x95\x7e\xaa\xdc\x2d\xcc\x6e\x9c\xa0\x3e\x5a\xec\xad\xe2\x4e\xc8\xc4\xce\x23\x1d\xfb\x80\xc1\x2f\xca\x3c\x50\x1d\xe3\x0d\x25\x16\x87\xfd\xb7\x8b\xf3\xc5\x75\xe3\x84\x0f\x3f\x5d\xcf\x7f\x3d\x82\x37\xe7\x1f\xe6\xf2\xcf\xf3\x4b\xb3\x43\x3f\xc0\xaf\xf3\xf3\xf3\xc5\xc7\x23\x58\x5c\x8b\xc9\x78\x04\xd7\xf3\x53\x93\x43\xbf\xf9\xb1\x5f\x1c\x14\x3a\x15\x36\x2c\x27\xbe\xca\xf0\xa7\xe2\xd3\xcd\xa5\xce\x2f\x0d\x7f\xab\x9a\xc7\xf0\xc7\xaa\x0d\x0d\x7f\x7c\x6d\xd4\x0a\xeb\x20\xe1\xd2\xbb\xf9\x79\x26\xea\x69\xf1\xba\xe7\x9b\xac\xe5\x2b\x2d\x26\x6c\xd9\x4a\x25\xe0\x51\x5c\x63\xd0\xb3\xb6\xac\xce\x24\x33\xf7\xf4\xec\x7a\xfe\x76\x79\xb6\xb8\x6c\xce\x5e\x79\x90\x3b\x02\x75\x1c\x72\x3e\xfb\xb4\x2f\x35\x2c\x2b\x6b\x66\x3a\x05\xe7\x66\x53\x25\x64\x77\xc3\xde\x53\xd5\x63\x3b\x9e\xcf\xc5\xab\xf0\x83\x6a\xa6\xea\x58\x8e\x26\xd4\x28\x52\xa7\x64\xc4\xe7\x19\x5b\x6d\x08\x76\x9b\x4e\x8b\xfc\x4d\xd4\x0e\xdf\x22\x1d\x63\x4b\xd5\x24\xd5\xbf\x19\x4e\x86\xdc\x42\x73\xc7\x61\xcd\xef\x83\x48\xcd\xad\x4c\x7a\xc2\x48\xcf\xe0\x74\xc5\xc2\xe2\x3d\x86\x22\x77\x5c\x1c\xa0\x87\x7b\xa7\x65\xe3\x9e\xce\x58\xb3\x34\x36\xa6\x83\xe5\x22\x8a\x7a\x51\xa3\x3f\x2b\x05\xe5\xbb\x80\x8b\x1b\xb0\x18\xea\x52\xa1\xde\x72\x8c\x30\xd0\x81\x64\x0c\xee\x93\x78\x27\xbb\xf0\x26\x63\xab\xcf\xeb\x24\x78\xe0\x49\xde\x29\x29\xcc\xae\xce\x5c\x1a\xb1\xab\x7e\x7c\x67\xe4\xd5\x07\xd8\xc3\xfa\x40\x87\x1a\xf9\xf9\x81\xd3\x5e\x25\xbc\xbc\xd1\xd3\xaa\x54\x0d\x75\xaa\x8c\x12\xb5\x6e\x36\x59\xd8\x4c\x8d\x6e\x20\x77\x83\x7b\xd3\xda\xe2\x6e\x2d\x00\x6c\xb3\x49\xf8\x86\x99\x1f\x8a\xd0\xdd\x0f\xdd\x21\x30\xab\x5e\x8a\x15\xe0\x7c\x04\xe0\xeb\xd2\x18\x10\x6f\x9e\x0a\x0f\x3e\xa5\xa7\x4d\xd8\x23\x6a\xc2\x97\x42\x73\xfb\x59\xe9\xd1\x09\x1f\x2a\xb4\x99\x87\x6b\xe5\xbd\xb8\xbb\x0b\x22\xd3\x55\x1c\x60\x77\x08\xb3\x60\x1f\x36\x94\xd2\x72\x69\x5a\x07\xf7\xd2\x81\x2e\x83\x87\x80\x3f\xa6\x10\x8f\x2b\x52\x8b\xa7\xb0\xf5\x99\x9e\x00\xb1\xe3\x11\xea\x36\x4b\x73\x4b\x70\x59\x3d\xec\x11\x47\x3d\xed\xf1\x89\x36\x0b\x17\x4f\x67\x9b\xbf\x2d\x3f\xe6\x93\x32\xc0\xde\x16\xb8\x34\x62\x70\x80\x54\xa8\xc9\x5e\x2c\x2c\x97\x47\x62\x9b\x4f\xf9\x2a\x8e\xd6\xe9\x91\xd2\x60\xe7\xc8\x0b\x4a\xaa\x84\xb9\x1e\x82\x35\xaf\x19\x71\x23\xe9\x14\x2c\xfe\xff\xb7\xda\xc0\xf9\x9f\xdf\x72\x3f\xe7\x5d\x49\x5a\x9f\x3c\xfc\x70\x52\x4d\xa7\xff\x41\xbd\x58\x32\x79\x52\x01\x9d\x4a\xaa\xec\x2e\x8c\x57\x9f\xa5\xe3\xae\x78\x67\x6e\x96\x7d\x0c\x42\xb3\xf3\x66\xf1\x88\x53\x50\x1c\x71\xb8\xe3\xf7\x71\xa2\xbe\x69\xcf\x93\xe3\x7c\xe8\xcb\xae\x30\x3c\x1b\x16\xcf\x8a\x45\x42\xaa\x84\xf7\x95\xc1\xa8\x9c\x01\x80\x3e\xab\xa9\x67\x77\x48\x33\x29\x33\x83\x90\xb3\x34\x83\x3f\xbf\x2a\x7a\x52\x02\x4d\xac\x56\x67\xdc\x10\x51\xdf\x07\xb1\x64\xf8\xa5\x96\xf0\x76\x76\x7e\xf6\xd3\xe5\xa7\xcb\xc5\xe5\xfc\x56\x0c\x90\x62\xec\xad\x4d\x35\x1a\xea\xa9\xad\x43\x41\x2d\xaa\x81\x32\x57\xf1\x24\x91\xaa\x19\x94\xc4\x6a\x9d\x3b\xbb\x87\x28\xd6\x74\x13\x56\x62\xf5\x69\x90\x5f\x34\x0a\x41\xf5\x46\xc0\x0d\xd2\x7a\x83\x15\xb6\x9e\x7a\x4b\x04\x9b\x28\x4e\x4c\xdc\xef\x8b\x67\x95\xc4\x69\xaa\xe6\xcb\x35\x5f\x1f\x56\x66\x07\xae\xe2\x71\xb3\xba\xbd\xed\xd4\x81\x22\x6b\xca\x8d\xb8\x5b\x43\x63\x75\x86\x7a\xba\x4a\x8d\x44\xc8\x91\x9a\x08\xb1\x1f\x29\x92\xdd\xd0\x6d\xa0\x78\xee\x78\x69\x34\xce\x77\xe2\xc6\xc6\x2a\x57\xb4\x71\x2b\x67\xfd\x49\x83\x68\xd3\xdc\x9e\x8f\xe0\x71\xcb\xf3\x95\xab\xb8\x21\xa2\x44\x72\xb6\xda\xd6\x9d\x90\xf2\xcb\x60\xc2\x53\x71\x18\x88\x36\xd8\x73\x09\xa8\x69\xc0\xaa\xc0\x00\xf1\x7d\xb9\x3f\xb0\x30\xe1\x6c\xfd\x94\x4f\x34\xdc\xce\x93\x1b\x57\xf2\xfa\x05\x91\xb4\x32\xd5\x8e\xd0\x00\x97\xb1\xe9\xf1\x52\x3d\xa2\x56\x89\x1a\x2e\x55\x27\xa7\xad\x15\x1c\xbb\x3b\xca\x4f\xad\x57\x2b\x5f\xf3\x83\x35\xac\xb6\x71\xb0\x42\x36\xe5\x9a\xef\xb9\x72\x14\x13\x5f\x7d\xab\x2e\x75\x9f\x3e\x07\xd1\xfa\xb6\x40\xd1\xe1\x56\x36\xcd\x27\x31\xc1\x70\xcb\x55\xae\x4c\x88\x93\x60\x13\x44\xac\x55\xef\xeb\x62\x02\xa0\xb7\xbf\x27\xb9\xda\x35\xbd\xc5\xa4\x15\x98\x55\x07\x49\x94\xd0\xe6\x67\xc7\x49\xe3\x8b\xd5\xbe\x65\xae\xac\x50\x8f\x6e\xfc\x2c\x5b\x40\x82\xdc\x78\x51\x52\xef\x83\x24\xdf\xaa\xd5\x00\x87\xdf\xa7\x9c\xc3\xed\x9e\x27\x9f\x94\xdc\x4f\xf9\x16\x73\xfb\x07\x64\x6d\x21\x4e\x64\xe4\x1b\xb9\xf1\xdd\xc7\xc9\x4e\xed\x0c\xc7\x94\xab\x03\xc8\xed\x34\xef\x5d\xb9\x9f\xde\x4a\x61\x45\x1d\xf3\x39\x71\x6b\xb1\xa1\xca\xad\x4f\xf7\xdd\xf8\x36\xbd\xe3\x75\xc1\x62\xcc\xcb\xf3\x50\x14\xcb\x86\xae\x6f\xd5\xe6\xbb\x2a\x28\xc0\x4d\x73\xd6\x96\xb2\xa5\x43\x45\xfd\xbd\xff\x07\x37\xad\xc4\x71\xea\x31\x48\xa5\xb1\xb6\x3c\xed\xd4\x4e\x30\x48\xf5\xb5\x7a\x0a\x25\xf6\xf5\xfc\xf4\xc3\xdb\xb9\xfc\xe2\xa3\xe2\x7f\x2e\xe6\xb3\xcb\xea\x7f\xce\x8c\xcd\x4f\xea\x29\xca\xcd\x7e\x29\x65\xdc\x7c\xb8\xa8\xfe\xbc\x3c\x3d\x9d\xff\x8d\x24\xf2\xed\xe2\xc3\xe5\xf2\xa8\xf1\x7f\x9f\x96\xd7\x1f\xe6\xad\xbf\x7a\x37\x3b\xbf\x99\x93\x5e\xf0\xee\x7a\xa6\x74\xeb\x0d\xa9\x57\xf3\xeb\xb7\xf3\xcb\xe5\xd9\xf9\xfc\xd3\x5f\xff\x4a\x92\x5b\x97\xf0\xa3\x4e\xee\x8f\xaf\x6c\xe5\xbe\xaa\xe4\x96\x5f\x71\x3e\xbf\xb9\xf9\xb4\x7c\x3f\xa3\x76\xe0\xcf\xf3\x4f\xa7\x67\x37\xcb\xeb\xb3\x37\x1f\x84\x3c\x33\x63\x86\x7a\xcc\x4d\x1a\xea\x39\xae\x0f\x44\x4a\x39\x31\x66\x49\xe5\xce\x68\xc5\x66\xbf\x50\x8a\xdd\x7c\xb8\x20\x15\x93\x73\x86\x52\x52\xce\x08\x72\x41\x39\x0f\xe8\xa5\xe5\x44\xa4\x14\x6f\xcc\x43\x8a\x80\xc6\x94\xb5\x15\xf0\xa3\xa5\x80\x1f\x5f\x59\x0a\x78\x45\xaa\x41\x77\x19\xa0\x0d\xf4\xd6\x2a\x60\x2c\x64\x93\xc4\x87\xfd\x9b\x27\x65\x92\xc0\x5f\x33\x4d\x3c\x73\xeb\x4f\x9b\x27\xac\xbf\xdd\x46\x83\x96\x72\x19\xd7\xe6\x5e\x59\x56\xc4\x89\x29\x51\x11\x25\x51\x6b\xea\xa3\x3c\xc1\xf4\x9d\x8b\xaa\xa3\x01\xee\xd0\x21\x35\x7c\xb2\x9d\x3f\xdd\x3d\x7d\x52\x55\xbc\xad\xb9\x14\x6f\xe3\x47\xf4\x51\xb6\x7e\xcc\x66\x09\x87\x3d\x4b\xb2\x40\x34\x47\x11\xc9\x2a\x3d\xdc\xa5\xfd\xe1\x52\xf4\xcf\x3e\x09\xe2\xa4\x8c\xf0\x57\x80\x24\x35\xe5\x7f\x75\x63\xc2\xb5\xc0\x5c\xdc\x37\x55\x8d\x20\x8f\x8f\x96\x36\x3e\x21\xdb\x32\xdc\x01\x71\xcb\x1e\x72\x5f\x6c\xb6\x2b\x2e\xc1\xf7\x71\xa2\x6e\xb6\xf9\x15\x47\x36\x39\x4e\x09\x92\xe3\xdb\xe9\x89\xaa\x72\x10\x49\xbd\xe7\xa1\x79\x4d\xc2\x1e\x91\x19\xec\xf8\xee\x4e\xdc\x37\xef\x81\x7f\x61\x2b\x15\x27\x80\xe7\x0d\xa2\x2e\x8b\xfa\x41\x87\xbe\x80\x57\x0a\x48\x5e\x6b\xf2\x5c\x5f\x8a\xb0\x3b\x55\xcf\x59\x56\x78\x65\xec\x0b\x00\x42\xc6\xbf\x10\x15\x04\x26\x6b\x4d\xbc\xe0\x15\x88\xbd\x5c\x49\xd2\x23\x48\xe3\x86\xde\x6c\xb7\x0f\x83\x55\x90\x85\x98\xd5\x05\xaa\xd1\x75\x5b\x88\x3f\x91\x17\xc6\x13\x28\xac\xae\x11\x52\x61\x50\x4e\x7a\x71\x21\xeb\x4e\x63\x19\xfe\x2d\x9f\x21\x48\xc5\x01\x7b\x64\x4f\x27\x20\xaf\x62\x1d\xb1\x79\x9b\x97\x2f\xc7\x09\x8e\xd6\xa5\xf6\xa5\x3e\x6e\x1b\x93\x86\xde\x57\x35\x87\xf6\xd6\x32\x84\xab\x65\xd9\x6a\xb9\x56\xac\xd0\x74\xc5\x87\xac\x7d\x4f\xc7\x0d\xd9\x81\xbb\xad\x6c\xd5\xdc\x07\xc2\x46\xbb\x8d\xd6\xe9\x12\xbd\x69\xbb\x02\x70\x9e\xb5\xf5\xc7\xd0\xcb\xb6\xfe\x10\x75\xc9\xd4\xc0\x1d\xc5\xb3\xe7\x89\x52\xec\xce\x94\x06\xe1\xf9\xd5\xdf\x57\xad\x1a\x7c\x6b\xca\xef\x76\xfd\x6c\x54\xdf\xb3\x08\x6e\x67\x85\xaa\x46\xfd\xcb\x9d\x58\x2d\xe2\x31\x1a\xb9\xf9\x64\x31\xdc\x25\xc5\xa1\xa1\x52\x2e\xe7\xa0\x4b\x4d\x8b\x8d\x5b\x7e\x6b\x7a\xf3\x8c\xef\xf6\x71\xc2\xc2\xca\xf6\x7b\x02\x73\x19\xcd\x1b\xb7\xc5\xc7\x4d\x6b\xcf\x91\x5c\x2c\x4b\x99\xa9\x62\xaa\xd1\x5a\x65\xdd\x67\xeb\xd4\x4d\xe8\x46\xe5\xb0\x63\xd9\x96\xef\x58\x11\xee\x4f\xee\x16\x72\x9b\xdf\xf0\x6c\xcb\x13\xdc\x42\x56\x69\xf9\xeb\x1d\x53\x47\x92\xf3\x73\x14\x4e\x5d\xdb\x55\xab\x49\xf5\x2f\x8f\xd6\xb2\xd3\xd3\x8c\xed\xf6\xb8\x36\x55\x40\x89\xd4\x84\xdf\x17\x56\xda\x20\x5e\x2b\xa5\x7f\xb3\xcf\x70\xaa\xba\x4a\xed\xbf\x63\x4f\x2d\xc3\x6d\xa1\xca\x27\x0c\x52\x8d\xda\xbf\xa9\xca\xb7\xd0\x7f\x8b\x2d\xbd\xa1\x00\x47\xf9\xea\xe5\xdf\xad\x55\xf9\x97\x6e\x0c\x52\x8d\xbf\xda\xb2\xc8\x20\x4c\x4a\xfd\xe9\xda\x28\x72\xcb\xaa\xad\x89\x62\x58\x51\x4f\xb3\xa6\xe7\x2a\xfa\x61\xdd\x3a\x65\x81\x32\xd1\xad\xdb\x9a\x95\x75\xba\x75\x52\x33\x54\x67\x5a\x31\xae\xc4\x91\xa8\xa4\x84\xc8\x66\x70\x39\x42\x7b\x74\xeb\xd3\xa9\xd5\x27\x55\x76\x56\x4d\x41\x28\x76\x3a\x3f\x5f\xce\x08\xe5\xae\x67\x4b\xca\xeb\xce\x2e\x97\xf3\xeb\xab\xc5\x39\xad\xf4\xe5\xfc\x97\xe5\xa7\xc5\xf9\xe9\x7c\x98\xdb\xd6\x17\xc6\x2a\x67\xf3\x52\x48\xdd\x6c\x5e\x0a\xab\x40\x56\xc5\xf0\x0a\x56\x55\x0e\xab\x09\xce\x4b\xe1\x15\xc1\xb5\x6a\xe2\xd5\xa9\xf5\xc2\x78\x6d\xae\x2a\x4d\x57\xe6\xaa\xf2\x74\x5d\x6e\xb7\x3c\x4e\x91\xda\x29\x8f\xd4\xe4\x76\xca\x23\x15\xb9\xc5\x70\xa6\x6a\x60\x3b\x75\xf8\xf4\xf6\xbd\x31\xd4\x64\x16\x65\xb5\x78\x08\x77\x23\x4a\xd4\xd5\xe2\xe9\x71\x7f\x17\xa7\xa4\xdf\x2a\x07\xc1\xfc\x1b\x06\x02\xa6\x56\xbf\x7d\xc9\xf6\xc1\xcb\x87\x1f\x5e\xaa\x22\xa9\xb9\x41\x5d\xf9\x3e\xae\x79\x94\x49\xa7\x4a\x15\x21\x47\x9e\x59\x0a\xf5\x53\x19\x02\xd4\xfc\x34\x2d\xb6\xbc\x32\xd6\x67\x16\x63\xa2\xe3\xec\x83\xd5\xe7\xf6\xf5\xee\xf9\x5c\xab\xaf\x34\x6f\xc7\x4a\x72\x7e\xbb\xb5\xa8\x54\x73\xa0\xb1\xe8\xb3\x18\x56\x77\x4c\xfa\x61\xd5\xbd\x9a\xa5\xcc\x29\x9d\x92\x91\xec\xa0\x7a\xdc\x68\x2c\x70\x1c\x61\x4b\xc4\xb3\x74\x26\x1e\x3b\x54\x4f\xd3\x4f\xef\x7d\xfc\x58\x84\xa7\x52\xae\xf4\xaa\xb7\x91\x6e\x7a\x29\x0f\xf9\x2a\xeb\xd8\x20\x60\xcf\x24\x86\x9f\xc4\x87\xcd\x16\x7d\xc1\xca\xc7\x57\x37\x54\x90\x9e\x41\xc4\x19\x91\x16\x57\x47\xf0\x66\xb1\x5c\x2e\x2e\xa6\xb5\xf0\xd3\xd1\xc5\x42\xc2\x72\x71\x85\xfa\xbd\xfa\x2a\xe3\x22\xd1\x61\x57\x8d\x2c\xfc\x3c\x1b\x4a\xc3\xa1\x7f\xc6\x93\x73\xe8\x9f\xd6\x04\xbd\xac\xd7\x9b\x3a\xfc\xc5\xe8\x97\x59\x95\x1a\x23\x37\x56\xc8\x2c\x56\xb3\xd4\x1e\xec\xc8\x05\x12\x8a\xc9\x77\xc1\xb3\xed\xd7\x20\x30\xae\xeb\xaf\xff\x66\x97\xbd\x46\x2d\xad\x96\xbe\xdb\xbc\xbd\x3f\xed\xa4\xac\xdb\x9a\xc9\x0f\xdb\xf5\xd2\x3c\xd8\x74\x4c\x56\xaa\x2a\x15\xc6\x5c\x1a\xd0\xe3\xf5\x61\x85\x57\x05\x29\x5b\xac\x8a\xaa\x2f\x63\x97\x37\x3d\xa2\xf7\x58\x33\x51\xdb\xcc\x94\xc5\x05\xba\x40\xb5\x0f\x75\x96\xe7\x8b\xf9\xf2\xfd\xe2\xb4\xc9\x87\xe7\x7f\x27\x9d\xec\x50\xd2\x8b\x82\xb3\x5f\x2a\x21\x67\x97\xe5\x9f\xa5\x93\x5d\xfe\x67\x71\x59\xbf\x59\x4e\xbb\x9c\x77\xbf\x8d\x52\x1c\x7d\xed\xae\x5a\x81\x54\x0c\xa9\x51\xa8\xda\x96\x52\x4c\x75\x83\x51\x49\x45\xe0\xb0\xe4\x69\xf6\x35\x88\xc8\x1b\xcd\xdb\xbf\xfa\xb1\xdd\xa2\x52\x4d\x2b\x94\xcc\x2a\xc9\xf2\x26\x6e\x78\x9d\xb0\x7b\xf3\xab\x00\xc0\x6d\xad\x68\x7d\x89\xf4\x30\xa2\xf6\xf1\x30\xa2\xe1\xe3\x61\x44\x0f\x23\x7a\x18\xd1\xc3\x88\xc3\x5b\x9f\xa7\x12\x3d\x95\xe8\xa9\x44\x4f\x25\x9a\x3f\x9e\x4a\xf4\x54\xa2\xa7\x12\xd5\xe3\xa9\xc4\x8e\x5c\x4f\x25\x8e\x3d\x9e\x4a\x1c\x2c\xe6\xa9\x44\x9c\x00\x4f\x25\x7a\x2a\xd1\x53\x89\x9e\x4a\xf4\x54\xa2\xa7\x12\x3d\x95\xe8\xa9\x44\x4f\x25\x1a\x08\xf0\x54\xe2\x24\x55\xfe\x57\xa0\x12\x75\x5a\x70\x8f\x27\x7a\x3c\xd1\xe3\x89\x1e\x4f\x44\x7d\xb7\xc7\x13\x3d\x9e\xe8\xf1\x44\xd3\xc7\xe3\x89\x86\x85\x3d\x9e\xd8\x2e\xe5\xf1\x44\xab\xf2\x1e\x4f\x34\xc2\x13\xdb\x49\x63\xae\xc5\xa1\xe6\xf9\x33\xc7\xc8\xd7\x62\xc4\x38\xbf\x20\x99\xd6\xa0\x71\xeb\xb9\xd2\x24\x8f\x61\x20\x0f\x86\xc6\x19\x4b\x65\x63\x3e\xc6\x1a\x06\xce\x48\x77\x81\x75\x84\x5d\xf3\x28\xde\x05\x11\xcb\x4c\x73\x5c\xba\xf0\x89\x3e\xad\x5e\x8a\x15\x30\x4d\x47\xe3\x2b\xd4\x31\x1b\xd4\x1a\xb2\x38\x6c\xca\x8e\x37\x3d\x4e\x60\xb2\x06\x01\x2e\x73\x10\x50\x1d\xa4\xf1\xfe\xf2\x40\x1b\x1f\xe0\x22\x93\x10\x3c\xeb\x38\xa1\xd5\x6f\x92\x0c\x43\x30\x51\x96\x21\x40\x64\x1a\xa2\x5c\xc6\x30\xd9\x86\x80\x38\x86\xc1\xce\xd1\x1f\x6c\x34\x97\x0e\x1d\xfe\xc1\xcc\xe9\x1f\x29\xb1\x32\x49\xa4\x83\x8e\xff\x68\xb1\x75\x50\x40\xef\xfc\x8f\x16\xf9\x0c\xb0\x00\x18\x02\x03\x68\xa1\x05\x52\xd7\x0f\x0d\x10\x9a\x43\x39\x9d\x8f\x81\x03\x68\xc1\xca\x00\x3a\x04\x0f\xa0\x45\xb6\x60\x03\x0d\x40\x80\x16\xd9\x0f\x1c\xd4\xd6\x3c\x7c\xe7\xf7\x42\x07\x24\xed\x80\x7a\x06\xc0\x03\x2a\x27\x01\x2a\x63\x7e\x1f\x7c\x60\x29\xd9\x05\x80\x00\xd6\x10\x02\x38\x5d\x79\xad\x60\x04\xf8\x5a\x07\x0b\x6b\x2e\x01\x4c\xd9\x04\xfc\x20\x89\x47\xf8\x04\xda\xca\xcb\x86\x19\x05\xe2\x6a\x16\xdf\x8f\x72\x0a\xf8\x9d\xb3\x70\xe0\x18\x66\x15\x88\x0b\xfb\x7a\x02\x5e\x01\x26\x61\x16\x60\x12\x6e\x01\xcc\xd8\x05\xfc\xbe\xd1\xb5\x77\x34\x6d\x18\xc4\x71\x50\xe3\x1d\x3a\x0c\x03\x75\x68\xb5\x99\x87\x66\x13\x50\x36\x0c\x2d\xf7\x50\x1b\x5b\xe4\x8f\xd7\xb3\x0f\x8a\x67\x40\x0b\x75\xcf\x3f\xc0\x24\x0c\x04\x18\x72\x10\x94\x63\x88\x73\x33\x0d\xe8\x4c\x35\x1d\x1e\xc2\xea\x1c\x76\xa2\x67\x22\x68\xd5\xec\x32\x14\x35\xbb\x0d\x7e\xd9\x1b\xe4\x28\x0a\x36\x02\x2d\xd6\x3d\x4b\x01\xd3\xf0\x14\xf0\x1c\x4c\x05\x4c\xc8\x55\xc0\x84\x6c\x85\x5e\xb6\x43\xbe\x02\x1c\x31\x16\x40\xb0\x38\x82\x05\x6b\x01\x16\xbc\x05\xd0\x99\x0b\xa0\x73\x17\x40\x67\x2f\xc0\x8a\xbf\x00\x1b\x06\xa3\x5b\x18\x6b\x31\xd3\x48\xc0\x1a\xed\xc0\x05\x8f\x01\x2e\x98\x8c\x5e\x21\x18\x1b\x5a\x9f\x10\x94\x21\xaf\x4f\x08\xca\x9a\x07\xce\x18\x0d\x70\xc3\x69\x80\x0d\xab\x01\x64\x5e\x03\xdc\x31\x1b\x30\x1d\xb7\x01\x93\xb1\x1b\x60\xc6\x6f\x50\xd4\x35\xa3\x0c\x07\xfe\x60\xaf\x98\x8f\x71\x8e\x03\x7f\x0d\x29\xb9\x8f\x7e\x96\x83\x7a\x63\x92\x6a\xe7\x1e\x9e\x03\x2d\x53\xc7\x7f\x34\x98\x0e\xc2\x15\xa4\x87\x01\x29\xb9\x0e\x7c\x6b\x4e\xca\x81\x80\x09\x0b\x42\xbe\x34\x0e\xf1\x20\xf8\x13\x7e\x8b\x1f\xd1\x30\x21\x84\x99\xa5\x61\x48\xc6\xb9\x10\xf4\x7b\x1a\x78\x86\x96\x0d\xc1\x37\x47\x83\x25\x19\xe2\x43\x28\x03\xa2\x59\xe1\x36\x23\x42\x9d\xbd\xcd\x89\xdb\xe4\x44\xa8\x4a\xec\xee\xd2\x48\x24\x6c\xa0\xa3\xa1\xb4\x65\x4b\x60\x32\xbe\x04\x5c\x30\x26\xe0\x82\x33\x01\x07\xac\x09\xd0\x78\x13\xb0\xb3\x1f\xd8\x72\x27\x60\xc9\x9e\x80\x53\xf3\x87\x05\x83\x02\x5f\xcb\xf8\x61\x89\xa3\x80\x19\x92\x82\x1e\x16\x5b\x15\xca\xba\x1f\x4b\xc1\x2f\x7f\x1d\x8b\x47\x2f\x9a\x82\x96\xad\x50\x96\x51\x3c\x05\x2d\x57\xe2\x2c\xd3\x21\x2a\x30\x0d\xa6\x02\x86\xa8\x0a\xed\xac\xe8\x1e\x57\x01\x23\x64\x85\x66\x62\x92\x78\x46\x2f\xb6\xf2\x2f\x62\x02\x22\xe3\x20\x30\x16\xbe\x6a\x46\xc2\x80\x20\x0f\x61\xa5\x90\x97\x5e\x8c\xc5\x89\xd9\xaa\x8b\xb2\x50\x0f\x5e\x95\xc1\x46\x83\xb3\x50\x46\x42\xaf\x7d\xa5\x66\x2f\x71\x7b\x56\xb2\x75\x77\xe8\x8d\x35\x45\x31\x8a\x4c\x81\xc2\xc0\x64\x38\x0c\xb8\x45\x62\x80\xac\xa4\x26\xa1\x31\x60\x81\xc7\x00\x1d\x91\x01\x6b\x4c\x06\x6c\x51\x19\x20\xe3\x32\x40\x46\x66\x80\x8e\xcd\x80\x05\x3a\x03\x64\x7c\x06\x6c\x10\x1a\xb0\xc5\x68\xba\x02\x68\x5a\x79\x3b\x9c\x06\x1c\x20\x35\x7d\x32\xf0\x8a\x70\x3b\xb4\xa6\x47\x06\x41\x21\x6f\x8f\xd8\x68\xea\x82\xc9\x02\x06\xe8\x4c\x60\xe0\x2c\x26\x01\x2a\x51\x13\x7c\x5b\x59\xc1\x60\xaa\xcc\x60\x60\x97\x1d\x2c\x3a\xec\xc4\x61\xfa\x39\x39\x9a\xcb\xe2\x95\xd8\xe2\xd3\xdc\xe3\xb1\xd5\xe9\x98\x70\xca\x26\xf4\x04\xcd\xbf\x2b\x41\x53\x8e\x11\xcf\xcf\x78\x7e\x66\xf4\xf1\xfc\x8c\xe7\x67\x3c\x3f\xe3\xf9\x19\xcf\xcf\x78\x7e\x06\x7d\xac\xf0\xf4\x8c\xa7\x67\x3c\x3d\xe3\xe9\x19\x4f\xcf\x78\x7a\xc6\xd3\x33\x9e\x9e\xf1\xf4\x8c\xa7\x67\x3c\x3d\x63\x5a\xd4\xd3\x33\x9e\x9e\xf1\xf4\x8c\xfe\xf1\xf4\xcc\xc0\xe3\xe9\x19\x4f\xcf\x78\x7a\xa6\xa8\xb4\xa7\x67\x3c\x3d\x53\x7f\x3c\x3d\xe3\xe9\x19\xbc\x10\x4f\xcf\x78\x7a\x06\x63\xfa\xf0\xec\x8c\x67\x67\x3c\x3b\xe3\xd9\x19\xcf\xce\x78\x76\x06\x3c\x3b\xe3\xd9\x99\xd1\xa2\x9e\x9d\x21\x94\xf4\xec\x8c\x51\x61\xcf\xce\x78\x76\x66\xbc\x2e\x9e\x9d\xf9\xcf\x65\x67\xf6\xc1\xea\x73\xfb\x72\xfb\x7c\x18\xcd\x95\xe6\xed\x58\x49\xd3\xdc\xed\x2d\x6a\xd6\x1c\x75\x2c\xfa\x2c\xc6\xd8\x1d\x93\x2e\x87\x9d\xd4\x42\xa6\x87\x14\x0a\x41\xb0\x0e\x12\xbe\xa2\x11\x30\xb6\x53\xf5\xb4\x78\x35\x45\xc4\xf3\xf5\x68\x59\x4d\x9b\x8c\xbb\x2f\xde\x2b\xe5\xca\x21\xe5\x39\x0c\xa3\xba\x1c\x69\xbe\x4a\x79\xc8\x57\x59\x27\xa3\x3e\xec\x59\x2a\xfe\x94\xc4\x87\xcd\x16\x7d\x0d\xcb\x07\x59\xc7\x65\x08\x4e\xcf\xae\xe7\x6a\x5f\xfe\x70\x79\x73\x35\x7f\x7b\xf6\xee\x6c\x7e\x8a\x5b\x75\x96\x8b\xab\x23\x78\xb3\x58\x2e\x17\x17\x18\x6f\x10\x7c\xe6\x46\x6d\x5d\x51\x12\x96\x8b\x2b\xd4\xef\xd5\x57\x19\x17\x89\x0e\xbb\x6a\x64\xe1\x27\x5b\x10\x65\x7c\x83\xda\xd8\xc4\x3d\x95\x65\xb2\xe4\x9f\xff\x44\x9f\xa5\x97\xf5\x7a\x53\x87\xbf\x18\xfd\x3b\x16\x3d\x35\x47\xae\x54\xac\xe0\x73\x4e\xb7\x07\x3b\x72\x95\x84\x62\xf2\x5d\xf0\x6c\x8b\xe3\xa5\xdc\xac\x7b\xd7\xf5\xd7\x7f\xdb\x6b\x5f\xa3\xaa\x56\xeb\xdf\x6d\xde\xe8\x9f\x76\x52\xd6\x6d\xcd\xf6\x8a\xed\x7f\xa9\x92\x6c\xea\x94\x95\x36\x8d\x47\x59\xf8\xa4\xbc\x23\x62\xb4\xa1\x35\xdb\x16\x06\xf5\xc7\x6d\xb0\xda\x96\x54\x53\x0d\x03\xd8\xb3\x04\x2f\xb3\x35\xe2\x73\x2a\x88\x6a\x58\xeb\xac\xd1\x17\xf3\xe5\xfb\xc5\x69\x63\x81\x2e\xfe\x4e\x7a\x6b\xa2\xa4\x17\x05\x67\xbf\x54\x42\xce\x2e\xcb\x3f\x4b\x4f\xcd\xfc\xcf\xe7\xb3\xe5\xfc\x66\x39\xed\x9a\xde\xfd\x36\x4a\x71\x74\x02\xd9\xaa\x15\x48\xc5\x90\xb9\x71\xab\xb6\xa5\x14\x53\xdd\x60\x54\x52\x01\x6e\x2c\x79\x9a\xe1\xb1\x67\xfb\x53\xfc\x8d\xe6\xed\xdf\xc6\x29\xde\xa2\x66\x4d\x2b\xdc\x7e\x1f\x3e\x01\xcb\xdb\xb9\xee\x47\x04\xec\x1e\x73\x27\x2d\xd1\x7a\xb1\x46\x8a\x65\xe7\x90\x99\x2b\x2a\x49\x1c\x3c\x9d\x20\x76\xb3\x23\xd2\xc9\x61\x03\x6a\xb8\xa2\x80\x71\xf7\xe5\x01\x62\xb8\x41\x00\x63\xb7\xaf\x2e\x2d\xfc\x1c\xf4\xaf\x09\xf9\x2b\xf7\x3c\x94\xd4\x41\xea\xb7\xa0\x37\x51\x12\xc7\x88\x5f\xb5\xf1\xa1\x44\x0e\xd1\xbe\x6d\x7a\x17\x37\x44\x3a\xa4\xaf\x2b\x50\xb5\xe9\xe1\xd2\x43\xed\xa2\x24\x56\x64\xc8\x00\xb1\x8b\xbb\x8b\x56\xa4\xcd\x00\xad\x8b\x1b\xa4\x5d\x38\xc8\xd2\xd7\xc7\x8e\xd2\x75\xb3\xba\x59\xd1\xb9\x5f\x61\xf3\xb3\x26\x73\x8d\xa8\x5c\xac\xea\x63\x98\xc8\xcd\xfd\xd7\x70\x03\x78\x88\xc6\x2d\xe9\x5a\x94\xc8\x31\x12\x97\x82\xe2\x8d\x51\xb8\x85\x5d\x1b\x25\xd4\x3d\x81\xeb\x9e\xbe\x75\xef\x78\x61\x40\xdd\xd2\x1d\x2f\x06\x9d\x2e\x4a\x82\x16\xbd\x19\xd6\x69\xdb\x3e\x7a\x16\x25\xb4\xe3\xb6\xa1\x25\x67\x91\x87\x8b\xee\xf8\xd1\x52\xb3\x48\xbd\x64\xd2\x74\xdc\x70\x44\xcc\xba\xa7\x65\x4d\x48\x59\x8b\xed\x75\xd8\x8f\x03\xbb\x8a\xf6\x13\xb2\x4d\xe2\x15\x25\x57\x4b\xc7\xf6\xd1\xae\xb8\x69\x35\xe2\xc0\x51\xaa\x44\x08\xeb\xdf\xeb\x09\x28\xd7\x09\x08\xd7\xc9\xe9\xd6\xa9\xc8\xd6\xa9\xa8\xd6\x49\x89\x56\x17\x34\x2b\x5e\xd7\x45\xa3\x58\xa9\x04\x2b\x91\x5e\x25\x92\xab\x44\x6a\x95\x4e\xac\x92\x69\x55\x3b\x52\xd5\x92\x52\xb5\x26\x54\xad\xe9\x54\x6b\x32\xd5\x9a\x4a\xb5\x26\x52\xdd\xd0\xa8\x0e\x48\x54\x32\x85\x4a\x23\x50\x1d\xd1\xa7\x13\x91\xa7\xd3\x50\xa7\x06\xc4\x29\xfa\x28\x3b\x46\x9b\x16\xf4\x28\x4a\xe8\x28\x69\x5a\x23\x47\x51\x82\x7b\x29\xd3\xba\x77\x00\x4a\x62\x0f\x61\xaa\x25\x46\x91\xa7\xf9\x8a\x2e\xed\xa7\x45\x91\x2a\x5a\x45\x96\x4e\x4c\x8a\x8e\x52\xa2\x24\x4b\xde\x10\x21\xda\x21\x3e\x91\xb7\x91\x36\x1d\xaa\xa7\x3d\x71\x57\xd1\x31\x32\x54\x7c\x0a\xed\x82\xa3\xa5\x42\x6d\x78\xc6\x01\x22\xb4\x43\x78\xe2\x04\xb7\x68\x50\x2d\xdd\x49\xef\xab\xa3\x5e\xb2\x13\x57\xcb\x8a\x8d\x75\x4b\x75\x4e\x43\x74\x5a\xd3\x9c\xd6\x24\xa7\x2d\xc5\x49\x20\x38\xc9\xf8\xa3\x1d\xb9\x69\x43\x6d\xba\x51\x86\x5b\xd0\x9a\x5f\x41\x15\x6e\x49\x6a\x1a\x50\x9a\xe8\xb0\x18\x83\x84\x66\x5d\xa7\x8d\x5b\x8c\x87\xe9\xcc\x9c\xb6\xc4\x6d\xf8\x23\x64\xa6\x22\x2d\xd1\x3a\xe6\x49\xa8\xcc\x09\x88\x4c\x13\x1a\x53\x1e\x26\x70\xca\x5b\xe7\x24\xe6\x38\x85\x59\xf5\x19\x4e\x71\xd7\x47\x60\xd6\x15\xfb\x84\x41\xaa\x31\x02\x34\x15\xfb\x16\xda\xf0\x36\x79\x99\x37\x09\xee\xbb\x87\xa9\xcb\x1a\x45\x89\xf6\x25\xd3\x11\x97\xd6\x06\x8b\x61\xb5\x3d\xcd\xd2\xde\x22\x2d\xf5\x9a\x76\xca\x02\xe5\x3c\x26\xe5\x14\xf1\x28\x3b\xd8\xa2\x43\x5a\x72\x1a\x52\xd2\x21\x25\x89\x57\x7d\x92\xe8\x48\x2a\x19\x49\xa4\x22\xed\x88\x48\x2b\x1a\x92\x46\x42\xd2\x28\x48\x22\x01\x49\xa5\x1f\x69\xe4\x23\x99\x7a\xb4\x22\x1e\xed\x68\x47\x5b\xd2\xd1\x96\x72\xb4\x25\x1c\x6d\xe9\x46\x5b\xb2\xd1\x05\xd5\x48\x24\x1a\xb3\xe6\xdd\xe1\x9c\x45\x9b\x03\xdb\x70\x93\x35\x0e\x79\x65\x6a\x5d\x95\x96\xfa\x17\x1b\x08\x6a\xde\x3b\x14\xea\x57\x7a\xcf\xdc\xf3\xac\xe9\x3f\x3f\xbe\xba\x1f\xa2\x20\x5b\x3c\xf0\x24\x09\xd6\xcf\xf0\xdd\x1f\x6a\x6f\xc3\x7e\xac\x38\x1a\x8a\xda\x8a\xc3\x8d\x3c\x6b\xe7\x7a\x2b\x75\x18\x91\x9f\x6e\xa8\xf3\x69\x1c\x8b\xa4\xcf\x5c\xa4\xd4\x0b\x79\x0e\x0e\xf9\x16\x09\x09\xc4\xe6\x75\x15\xfb\xf7\x93\x2a\x2a\x5d\x48\xd9\x4a\x62\x05\x91\x64\xfd\x54\xe5\x72\x5f\x47\xf1\x21\x8a\xe0\x31\x12\x1b\xa4\x95\xd2\x96\xa9\x3f\xff\x76\x2b\xde\x73\x6b\x8e\xac\x26\xfc\xfe\xd3\xc3\x0f\x2f\x13\x9e\x66\x2f\x1f\x7e\x78\x59\xc0\xa2\x27\xea\xd0\x79\x9a\x37\x73\x6c\x4a\xb4\xe6\x2a\x9d\x08\x6e\x2f\x5a\x02\x06\x7c\x0b\x32\xfe\x25\x1b\x1a\x61\x86\x8e\xe8\xed\x99\xc4\xbf\x18\xfd\xda\x58\xb1\x30\x22\xb0\x35\xff\x12\xf6\x98\x4f\x06\x71\x66\xdf\xb1\xe4\xf3\x3a\x7e\x8c\x60\x1d\xa4\xfb\x90\x29\x73\x00\xff\x92\x1d\xd8\xb0\xef\xad\x18\xc9\x23\x91\x92\xd4\x87\xac\xe2\xe8\x3e\x0c\x56\xd9\xa0\x4a\xea\x18\xbe\x3c\xbd\xdd\xb2\x64\xe8\x33\x8e\x21\x2d\x14\x28\x83\xbf\xba\x0b\x59\xf4\x79\xf0\x17\x61\xbc\x49\xaf\x58\xc4\x87\xbe\xd0\xd4\x73\x3d\x6f\x87\xb1\x75\x08\xb1\x06\xb5\xfd\x35\xd5\x0b\x46\x0a\x75\x96\x1d\xd1\x87\x45\xe5\x72\x85\x42\xde\xc3\x63\x07\xe8\x9c\xd4\x9b\xec\x83\xde\x99\xac\x23\xb4\x49\xa0\x44\x1b\xa8\xc5\x34\x34\x6c\xbb\xcd\x82\x34\x6f\x89\x0c\xe5\xd6\x53\xb8\xf2\xbc\x5b\x5c\x5f\xcc\x96\x2d\x12\x6a\x76\xfd\xf3\xe9\xe2\xe3\xe5\x11\x5c\xcf\x3e\x8e\x39\x70\x98\xdc\x5c\x8e\x35\xaf\x19\x2d\x52\xd4\x62\xf4\x87\xd7\xb3\x8f\xfd\x2b\x63\x90\x85\x83\x9b\xaf\xe1\xf8\xe8\x1c\x32\xb2\xc1\x36\x6e\xf4\xdb\x42\xfe\x97\x85\x6a\x7b\x92\x35\x2a\xb4\x08\x8f\x72\x4c\xf4\x8f\xf3\x7c\xb5\x71\xbf\xb6\xff\x32\xba\x8c\xe1\x47\xf6\xb8\xcc\xd6\x0a\xbf\x12\x3f\x6f\x99\x03\x47\x13\x00\xa2\x56\x6b\xb3\x95\x38\x1b\xde\x97\x5c\x2d\xd5\x26\xb9\x3c\x8f\xe5\xf7\xdf\x0c\x9b\xca\x8d\xd7\x7c\xd1\xbe\x6a\xf4\x8d\x1a\x5a\x10\xac\x5a\x7b\xe1\xaf\xbd\xc5\xf9\x6a\x99\x8f\x29\xc4\x3b\x1a\x63\xec\x54\x6d\x25\x10\xab\xa2\x52\x7b\x2c\xe6\x9d\x6c\x99\xb1\x13\x3c\x06\x0a\xdb\xc5\xcf\x71\xc6\xbf\x88\x8d\xce\xcb\x4e\x9a\x59\xbc\xcb\xd0\x70\xd3\x25\x17\xd4\xcc\x16\x8d\xa2\x09\x90\x70\xb1\x38\x9d\xe3\x63\x23\xbc\x5d\x9c\x2f\xae\x8f\xe0\x97\x4f\xd7\xb3\x5f\x8f\xe0\x66\x39\x5b\xde\x98\xf8\x16\x9a\x2a\xd6\x8e\x3b\xd5\x32\x2a\x24\x6b\x65\xf4\x4b\x59\x71\xa3\x5f\xca\x6f\x1b\xfc\x65\xb1\x46\x98\x4d\x6a\x13\xa7\xa8\x76\x90\x91\xf1\x45\x08\x86\xa3\x00\xc9\x6b\x64\x79\x92\x53\xc8\x44\x90\x9a\x4d\x3c\x92\x69\x9a\x62\x8e\x36\x34\x41\xa3\x38\x5e\xf2\xe4\x33\x6c\x73\x4c\x4e\xe8\xe3\xb6\x1a\x66\xb4\x04\x0e\x84\x0d\xf9\x86\x47\xeb\x25\xdf\xed\x43\x96\x19\xad\x7f\x04\x83\x78\x6b\x68\x9e\x37\xde\x69\x24\xa1\xb9\x3e\xcd\xa4\xa9\x56\x14\x2e\xae\x97\x62\x57\x88\xd8\x4e\xfc\xf1\xb6\x52\x1f\x99\x5a\x18\x3a\xb4\x92\x1c\xfa\xb9\x2b\x55\x90\x42\xba\x8d\x0f\xe1\x5a\x1a\x78\x0c\x25\xe6\xd5\x92\x26\x4a\x49\xd2\xee\xe3\x30\xb7\x09\xe6\xe7\x46\x69\x13\xba\xfd\xff\xfe\x11\xb2\x3b\x1e\x7e\x12\x0d\xf3\xcf\x5b\x53\x43\x6b\x2d\x50\x43\xc2\xd3\x38\x7c\xe0\x05\x2f\x2a\xa5\xbd\x78\x91\xaa\x95\xfa\x04\x4c\xd6\xd7\x5d\x10\xcd\x28\x20\xb4\xed\x30\xb8\xe8\xbc\x17\x3f\x14\x9a\x07\xf3\x30\x7e\xe4\x09\xdc\xc5\x07\x65\x12\x45\x98\xf1\x72\xcb\xb4\x98\x97\x3c\x5a\x3d\xe5\xa7\x8c\x20\x2d\x47\xc2\x91\xf4\x1f\xe3\xa2\xae\xc6\xbe\x53\x77\x4f\xb9\x59\xaa\xf4\x7c\xdc\x05\x51\xb0\x3b\xec\x6a\xe1\x7a\x95\x1d\xcb\x9c\x09\x3c\xa4\x5c\xd9\xd5\xeb\x07\x6e\xa5\xe7\x7c\x17\x27\xc0\xbf\x30\x51\xcd\x23\x08\x30\xf6\xdb\x22\xcb\xf8\xfe\x70\x17\x06\xe9\x96\x8b\xd6\x5b\x71\xe0\x0f\x42\xec\xf7\xaf\x44\xb5\x0f\x19\x97\xf8\xb7\xa9\xc8\xdb\x5d\x10\x7d\xd2\x20\xe9\xd5\x4c\x2a\xa0\xe4\xef\x4d\xd5\xe9\x79\x2d\x64\x8e\x89\x47\x29\x26\x8a\x33\xd8\xb1\xcf\xa2\x1d\xa2\x94\x57\x3a\x5e\x16\x99\x76\x91\xac\xa1\xfa\x7e\x96\x49\x1f\x4a\xf5\x96\x12\x7f\x37\xf2\x14\xdb\x87\x71\xb6\x14\xb3\xe1\x59\xe6\xcd\x55\xfe\x36\x5c\x59\xea\x5e\x56\xbc\xcd\xd8\x03\x48\xa7\xf0\x28\x27\x52\xd9\xfd\xa2\xc9\xcc\x67\x52\x0e\x2e\xaa\xa3\x47\xf7\x38\x7a\x75\xbe\x58\x7e\x5a\xfe\x7a\x45\x38\x93\x02\x9c\x9f\x5d\xce\xe5\x69\xf4\xed\xcf\xf3\xd3\x4f\xb3\xeb\xf9\xac\xfa\xbf\x37\xb3\xeb\x23\x78\x3f\x9f\x2d\x2f\x66\x57\x66\x18\x8c\xb9\x15\xf8\x58\x5f\x6d\xc3\xb2\xa2\xd6\x86\x3f\xad\x7f\x1a\xb2\xc8\x9b\x99\x99\xc1\xf6\xb8\x68\x24\x93\xfb\x53\xf3\x3c\x83\x99\x33\x88\x38\x2c\xc3\x26\xa5\xe7\x99\x3a\x94\x97\xf6\x9c\xc7\x73\x57\x63\xb1\x2d\xc9\xc5\x5e\x6e\x28\xe6\xae\x24\x75\x1c\xf5\x3e\x89\x77\x72\x36\xdd\x64\x6c\xf5\x79\x9d\x04\x0f\x3c\xc9\x23\x6d\xa6\x30\xbb\x3a\x33\x0c\x8d\x89\x0e\xb7\x92\x11\x03\x69\x92\x82\xf0\x0c\x0c\x00\x64\xa0\x54\xd7\xa3\x80\x5c\x91\xc6\xc8\x50\x65\x61\xcf\x12\xb6\xe3\x19\x4f\xd2\xa6\x81\x13\x61\x1c\xc6\xfa\x43\x9b\x5f\x5d\x8a\xe7\x38\x8f\x53\x67\x5c\x80\x12\xca\x07\xea\x54\x09\xc5\x25\x19\x39\xbc\x40\x13\xcc\x87\x98\x40\x6d\xea\x11\x46\xad\x57\x63\xc0\xbd\x79\x82\x35\xbf\x67\x87\x30\x3b\xca\xe3\x34\x3d\x52\xf2\x42\xd5\x17\xa2\x06\xcc\xfc\x41\xc6\x07\x2d\x1c\xe2\x91\x52\x6b\xa1\x31\x76\xe2\x06\xb7\x6f\xe5\x5e\xc1\xe7\xff\xa8\x52\x8e\x3d\x04\xfc\xb1\xbc\xb4\x8d\x69\xb6\xbb\x0f\x6d\x2c\x83\x5d\x70\x2a\xf5\xb8\x49\x94\x64\x1b\xa8\x4a\x3d\x06\xe1\xaa\xd0\x32\x2b\x92\x26\x1d\x0c\x5a\x45\x10\x5c\x0f\x73\xa5\x09\x5d\x45\x90\xf8\x3c\xc1\xae\x8a\x77\x0d\x86\xbc\x22\x48\x2c\x83\x64\x0d\x07\xbe\x22\x48\x2e\x42\x27\x8d\x85\xbf\x22\x88\x56\x0c\xdf\x50\x10\x2c\x82\xd0\x56\xd8\x2c\x4d\x28\x2c\x82\xd0\xfe\xe0\x59\x72\x4d\x24\x48\x74\x1a\x42\x4b\x3d\xe3\x81\xb4\x28\x43\x35\x9d\x22\x9c\x96\x4e\x76\x37\xa8\x16\x45\x24\x2d\xe5\x9e\x5d\x30\x2e\xf5\xb8\x5c\xcd\xad\x02\x73\xb5\x24\x4e\x7f\x68\xb1\x0e\xcb\xa5\x1e\xa3\xe0\x5c\x94\xbd\x22\x1e\x09\xd1\x45\x5d\xc1\x59\x7f\xa0\x2e\xd2\x96\x56\x85\xf6\xd2\x84\xeb\x22\x4a\x6c\x07\xf8\x6a\x06\xed\x22\x08\xed\x0f\xf3\x95\x6b\x7b\x68\x7b\xef\x40\xb0\x2f\x52\xbe\x35\x70\x1c\xf2\x4b\x3d\xd3\x64\x5c\x1b\x0d\xff\x45\xec\x7b\x5d\x06\xb3\x82\x01\xa2\x88\xd4\x86\x0d\xab\x42\x81\x51\x0e\x17\x83\xc1\xc3\x9a\x0d\x41\x19\xab\x3d\x21\xc4\x6a\x23\x8d\x72\xa8\x1d\x0c\x24\xa6\x82\x83\x11\xc4\xba\x0f\x27\xa6\x1e\xf7\x41\xc5\xd4\xe3\x1c\x78\xca\xab\x3b\x01\xf6\xa4\x9e\x0e\x89\xd4\x0c\x33\x46\xeb\xb2\x46\x60\x32\x7d\xb0\x31\x6a\x55\xbb\xe1\xc9\x6a\x34\x14\x65\xb2\x0d\x06\x29\x2b\x74\xf5\x04\xc1\xee\x43\x95\x35\xe4\xba\x0c\x58\xd6\x10\xdc\x1b\xb6\xcc\x52\x64\x1e\xe8\x4c\x1f\xbc\x8c\x2e\xbb\x19\xee\x4c\x1f\xc2\xcc\x89\xf4\x1f\x5f\xe9\xa4\xbf\xb2\x91\xae\x09\x7d\xe6\x24\x9c\x99\x7a\x28\xb9\x0f\xe9\xa1\xcd\x5a\xa5\x09\x39\xf9\xc8\x61\xce\xda\x85\xd1\x89\x04\xc9\x21\xcf\xda\x85\x29\x79\xfd\x2c\xc2\x9f\xe9\x8a\x53\x72\xf2\x59\x87\x42\x6b\x09\xb1\xc9\x0f\xe8\x20\x2c\xda\x80\x18\x6c\x76\x3e\x07\x21\xd2\xfa\xc5\xa0\x73\x05\xba\x0a\x97\xd6\x92\x65\x9b\x7a\x90\x1c\x3a\x4d\x3d\xb4\x00\x6a\xea\x71\x14\x46\x4d\x3d\xa3\xc1\xd4\x08\x2b\x7d\x11\x7e\x6d\x28\xa4\x9a\xe5\x79\xf0\x44\x1f\x58\x8d\x74\xe1\x6a\x85\x62\xb3\x8a\x6b\xa4\x9e\xbe\x80\x6c\x65\x90\x35\xca\x99\xcd\x71\x58\x36\xf5\x8c\x05\x67\xa3\x2a\xe4\x87\x42\xb4\xd1\x54\x0d\xf5\xa0\x6e\x7d\x81\xda\xec\xae\xb2\x4a\x21\xd2\x09\xd7\x46\xa9\xae\xfb\x00\x6f\xea\x19\x0d\xf3\x66\x71\x89\x1d\x0a\xf6\x46\x19\xaf\xad\xf0\x70\xed\x90\x6f\x94\x9a\xf6\x04\x89\x2b\x47\x2e\x45\x9f\x3f\x10\x2a\x8e\x18\x81\x4d\x3d\xae\x03\xc6\xa9\x67\x3c\x6c\x9c\x85\x6d\x6e\xdd\x17\x3c\x8e\x20\xb3\xb9\x00\x68\x42\xc8\x11\x35\x59\xed\xd8\x97\xad\x40\x72\x24\xcd\x53\x7f\xe8\x39\x38\xc3\xe5\x56\x50\xcf\x04\x01\xe8\xd4\x63\x1d\x86\x4e\x3d\xd6\xc1\xe8\xea\x62\xe8\x21\xe9\xd4\x43\x08\x4c\xa7\x1e\x2b\x2b\x8b\x6d\x90\x3a\xf5\xd8\x84\xaa\x53\x8f\x4b\x53\x91\x45\xd8\xba\x96\xbc\xe9\x0d\x45\x96\x41\xeb\xd4\x63\x10\xba\x8e\xb2\x72\xa9\x2c\xb8\x7d\x01\xec\x6c\x56\x98\x4e\x32\x98\x22\x8c\x1d\xd5\x1a\x5f\x0f\x7c\xd7\x0d\x66\x47\x92\xda\x0d\x7f\xd7\x08\x69\x47\x39\x0d\x3b\x0c\x82\xa7\x9e\x09\x42\xe1\xa9\x67\x2c\x20\x1e\xc5\xbc\x5f\x04\x26\xee\x09\x8b\x47\x10\xd9\x08\xa4\xd7\x0d\x8e\x47\xe9\xa2\xde\x70\x7a\x35\xa3\x19\x45\x7b\xdd\x1b\x54\x8f\x14\xdc\x55\x3d\x2e\x43\xeb\xa9\x67\x34\xc0\x1e\xf1\xb0\x32\x12\x66\x8f\x20\xb5\x0a\xcc\xa7\x0d\xb6\xe7\xd0\x3c\xd8\x0d\xb9\x47\xbe\x5b\xd4\x4c\x62\x9a\xc0\x7b\xb4\xf9\xdf\x6b\xbf\xaa\x59\xa3\x68\x3e\x43\x06\x21\xe0\x69\x67\xb7\xa1\x04\x39\x70\x47\x6a\xde\x09\x82\xf7\xa9\x67\x9a\x10\x7e\xea\x71\x18\xc8\x4f\x3d\x54\xa5\x3f\x29\xa8\x5f\xb3\x30\x36\xb4\x5f\xb3\x34\x32\xc0\x5f\xb3\x30\x2d\xcc\x5f\x53\x06\x29\xd8\x5f\x53\x04\xcd\x6c\x41\x09\xfc\xd7\x2a\x4b\x33\xb6\xd0\x82\x00\x36\x4b\xd3\xec\x25\xc4\x80\x80\xcd\xe2\x56\xe6\x0e\x9b\xe0\x80\x4d\x19\xb6\xc6\x0e\xbb\x40\x81\xfd\x52\x28\xc6\x05\xbb\xa0\x81\xbd\x52\x48\x86\x0e\xfb\x00\x82\xda\xfa\x98\x86\x11\xac\x9e\x7b\x24\xf7\x03\x4e\xe2\xc5\xe7\xa1\xa0\x70\x08\x0a\x0c\x87\x46\xf8\xad\x72\xd7\x46\x36\xa3\x6a\x03\xf3\xd8\x74\x0f\x3f\xbc\x54\x45\x0c\x83\xcf\x55\x8f\xf2\x61\x5f\xf3\x28\x53\x2e\xf2\x92\x3f\x96\x27\xbb\x22\x3f\x46\xa1\x40\x43\xeb\x4d\xc5\x46\x5e\x44\xc9\x13\xe7\x02\x89\xa4\x61\xb6\xd8\x7d\xb0\xfa\xdc\xbe\xc8\x7f\x1d\x68\xe7\x4a\x53\x13\x9a\xbc\x89\xf4\x1b\xd6\x15\x6c\x0e\x63\x16\x7d\x16\x37\xd1\x3b\x26\xfd\x55\xc9\xda\x58\x35\x26\x9f\x0b\x85\x59\x07\x09\x5f\xe1\xc1\x2e\x70\xac\x0b\x3b\x2d\xaa\xf1\xcd\x29\xc1\x74\x83\xa4\xac\xad\x03\x6f\xe9\xf7\x4a\x7d\x75\x48\x79\x0e\x7f\xc9\x51\x44\xbb\x64\xa5\x3c\xe4\xab\xac\x93\xac\x09\xf6\x2c\xa5\x59\x06\x93\xf8\xb0\xd9\xaa\x98\x1a\x6a\x54\xba\x70\x3a\x3b\x3d\xbb\x9e\xab\xd3\x48\x23\xae\xde\x72\x71\x75\x04\x6f\x16\xcb\xe5\xe2\xe2\xf9\xfc\x88\xb4\x75\x21\xc8\x59\x2e\x4c\xf8\xec\x76\x29\xf5\xb5\xc8\x82\xd1\x61\x57\x8d\x47\xea\xac\x0d\xa2\x8c\x6f\x08\x77\xf9\x3c\xac\xa4\x28\xff\xe7\x3f\xd9\x4e\xfa\xcb\xfa\x97\xd8\x4d\x23\x31\x8b\x76\x2c\x7a\xb2\xb5\x85\x4b\x85\x97\x9a\x91\x62\xd2\xe8\xa6\x00\x52\x6c\x3e\xa1\x2f\x78\xb6\xfd\xda\xa4\xe1\x75\xbd\x2a\xff\x12\x0b\x6d\xa3\xc6\x0e\x16\xdb\xdb\xbc\x37\x3e\xed\xa4\x44\x74\x02\x65\xf5\xb4\x4d\xfa\x4d\x33\x81\x52\x6e\xf2\x88\xe6\x8a\x20\xb3\x21\xc6\x32\x61\x5b\x85\x95\x54\x41\x89\x08\x12\x9b\x04\xcd\x9e\xe5\x68\xa1\xf5\x44\xc9\xb1\xbc\xba\xda\xd1\xc1\xe6\x70\x31\x5f\xbe\x5f\x9c\xb6\x22\xae\xaa\xbf\x93\x8e\xc9\x74\x91\xd2\x19\xb9\xf8\xf3\xd9\x65\xf9\xe7\x9b\x0f\x17\x16\x52\xcf\x67\xcb\xf9\xcd\xf2\xf9\x36\xac\x6e\xfb\xd0\x85\x10\x55\x32\x55\x7b\x5a\x14\x26\x69\xa1\xaa\x1e\xa3\x17\x56\x1d\x86\x28\x9f\x6a\x72\x78\x7d\x9d\x0b\x95\x2e\x9b\xd8\x37\x75\xa1\xb2\xae\x60\xd3\x42\xbc\xdf\x87\x4f\xc0\xf2\x0e\xa8\xfb\xd0\x61\xef\xd4\xf7\x19\x4f\xe0\xb6\x26\xe0\xb6\xe6\x99\xe5\x83\x0e\xf8\xa0\x03\x78\x89\x3e\xe8\x80\xfe\xf1\x41\x07\x7c\xd0\x01\x1f\x74\xc0\xe0\xf9\x0f\x0b\x3a\xa0\x3b\x1a\xf8\xe8\x03\x3e\xfa\x80\x8f\x3e\xe0\xa3\x0f\xe0\x45\xfa\xe8\x03\xd5\xe3\xa3\x0f\xa8\xea\xfa\xe8\x03\x3e\xfa\x40\x43\xae\x8f\x3e\xe0\xa3\x0f\x0c\x3f\x3e\xfa\x00\xa1\xb0\x8f\x3e\xe0\xa3\x0f\x8c\x8a\xf1\xd1\x07\x1a\x8f\x8f\x3e\xd0\xff\xf8\xe8\x03\x3e\xfa\x80\x8f\x3e\xe0\xa3\x0f\xf8\xe8\x03\x3e\xfa\x80\x8f\x3e\xe0\xa3\x0f\x0c\x3e\x3e\xfa\xc0\x7f\x4c\xf4\x01\x9d\xc5\xc8\x87\x21\xf0\x61\x08\x7c\x18\x02\x1f\x86\xc0\x87\x21\xc0\x3d\x3e\x0c\x01\xf8\x30\x04\xe5\xe3\xc3\x10\xc8\xc7\x87\x21\xa0\xca\xf0\x61\x08\x28\xa5\x7d\x18\x02\x1f\x86\x60\x54\xca\xbf\x6e\x18\x82\x76\x12\xd2\x6b\x71\xfc\xfb\xba\x99\x48\x65\x15\xf0\xc2\x26\xba\xd0\xe2\x6a\xd3\xb8\xa5\x5e\x69\x92\x91\x32\x90\x07\x6c\xb8\xe3\xd9\x23\x47\x69\xc8\xb2\xc7\xb8\x99\xc3\x51\x45\x41\x30\xdf\x41\x69\x10\xc4\x9a\x47\xf1\x2e\x88\x58\x16\x7f\xa5\x80\x04\xa7\x55\x05\x68\x62\xa6\x1c\x18\xd4\xca\x75\x4c\x72\xb5\x66\x2e\x0e\xe5\x72\xa0\xe0\x0e\x48\xf8\x5c\xb4\x40\xc9\x47\x0b\x76\x48\x0d\x95\xc8\x02\x9b\x51\x05\xee\xf2\xd3\xc2\x57\x18\x5d\x36\x75\x9d\x22\x6f\x2d\x98\xe4\xae\xa5\x68\x50\xa4\xe6\x7b\x24\x7f\x2d\x51\x33\xe3\x2c\x87\x2d\x58\x8d\x7f\x70\x81\x95\x81\xbd\x76\xdc\x39\x5e\x06\x93\x21\x66\x30\x86\x99\x51\x65\xe6\x70\x5a\x0f\x6a\x46\x94\x5a\xea\x91\x9f\x13\x37\x83\x49\x90\x33\x18\xc1\xce\x88\x22\x35\xe8\x96\xb5\xb7\x3e\x28\x5d\x51\x13\x58\xab\xe1\x67\x44\x91\x3d\xd0\x9a\x44\xd0\xdc\x7d\xbd\x15\xb8\x06\x93\xc0\x6b\x30\x06\xb0\x51\x07\x69\x3a\x00\xb1\x11\x65\x16\xcd\xa8\x03\xd9\x2c\x45\xf6\x37\x2c\x7d\x75\x88\x5c\x59\x79\xdd\x40\x6d\x30\xc1\x5e\xe2\x00\x6e\x83\xaf\x7b\xcc\x72\xc4\xb9\xc1\x84\xac\x1b\x4c\xc4\xbb\xc1\x34\xcc\x1b\x8c\x70\x6f\xe4\x9a\xf6\xb3\x6f\xd4\x75\x2f\x27\xe6\x34\xfc\x9b\xd5\x72\x52\x50\x73\x0d\x06\x8e\xda\x92\x3a\x72\xae\xe0\xe0\x88\x32\x7b\xe9\x39\x9a\x3f\x23\xe4\x46\x66\xe7\x04\x1d\x0c\x53\x74\xd4\xaa\x46\xeb\x3e\x0b\x19\xb5\x3d\xb5\x36\xc1\x92\xa6\x23\x4a\x2d\x19\x3c\x0d\x51\x47\xfd\xf4\x3e\x0e\x8f\x66\x67\x14\xcf\x00\x8b\xe7\x64\xc4\xb7\x79\x3c\xa2\xd0\x8a\xe2\x9b\x8c\xc9\x83\x51\x2e\x8f\x7c\x7d\x69\x19\x41\x9d\xb2\x79\x30\x29\x9f\x07\xc3\x8c\x1e\x51\x62\x14\x67\xd3\x71\x7a\x30\x15\xab\x07\xa3\xbc\x1e\x51\x68\x4e\xf9\xf5\x33\x7b\x44\xb9\x35\xd2\xaf\x8f\xdb\xb3\x93\x9c\xd3\x7e\x4d\x76\xcf\x4e\x64\x8d\xf8\xd3\xf1\x7b\x76\xc2\x9b\xd4\x9f\x9e\xe1\xb3\x7b\x43\x93\xfc\xd3\x73\x7c\xce\xde\xf0\xea\xc7\x2e\xcf\x58\xb1\x7c\x96\x83\xc6\x9e\x00\x04\xb2\x1f\x00\x58\x93\x80\x60\x4d\x03\x82\x2d\x11\x08\xb6\x54\x20\xd8\x92\x81\xe0\x80\x0e\x04\x7b\x42\xb0\x2b\x82\x66\xa3\xd6\xc8\xa1\x99\xcc\xc1\x1d\x2d\x08\xee\x88\xc1\x5e\x51\x78\xab\x75\x9f\x28\x82\x19\xbd\x4f\x14\xc1\x96\x0e\x8e\x09\x42\x70\x49\x11\x82\x3d\x49\x08\x96\x34\x21\xb8\x26\x0a\x61\x94\x2a\xa4\x6b\x33\x26\x22\x0b\x61\x32\xba\x10\xc6\x08\x43\xea\x89\x30\xe1\xbd\x94\x21\x55\xf3\x9c\xb3\x89\xbd\xa4\xa1\xad\xde\xe0\x44\x4b\x1b\xda\xdf\xf7\x34\xc4\xa1\xcd\x91\xb8\x72\xd4\x6e\x51\x87\x36\x7a\xf2\x2e\xab\x48\x35\xac\x42\x75\xb7\x73\xc6\x2b\xc2\x94\xcc\x22\x8c\x70\x8b\x2e\x2e\xfc\x5d\x76\x91\xbe\xc6\xa8\x4f\xed\xe7\x17\xa9\x92\x55\x1a\x91\x3e\x86\x91\xaa\xee\x29\x98\xdd\x3e\x8e\xd1\xe2\xae\x5c\x39\x15\xeb\x58\x46\x8b\x85\xab\xc6\xec\x0d\xf0\x8c\xf4\xa1\xd6\xac\x7c\x8d\x69\x74\xb0\xd6\xe8\xb9\x46\x8b\x31\x51\xc6\x3d\x74\x01\x9e\x43\xcd\x85\xa6\xd8\x15\x5a\x7c\xa3\x93\xe9\xe6\x5e\x93\x54\x27\x23\xad\x83\x40\x02\x3d\x10\xa4\x7a\x1c\x11\x92\xe0\x8c\x92\x04\x1b\x52\x12\x5c\xd8\xf0\xdc\x10\x93\xe0\x84\x9a\x84\x09\x8c\x92\xd6\xf4\x24\x7c\x5d\x93\xa4\x13\x90\x12\x26\x83\x29\x61\x04\xa8\xa4\xab\xce\x7b\xa1\x4a\xea\x4c\xc9\x51\x4c\x3d\x58\xe9\xc0\x05\xe3\xa8\x05\x57\x92\x2d\x3d\x6d\x24\xd3\x8a\x4d\x05\xa5\x67\xd6\xb0\x3e\xd2\x8a\x4c\x55\x89\xbb\x47\x33\x61\x1a\x3c\x13\x86\x11\x4d\xa2\x44\x19\xc8\xb2\x0f\xd3\xa4\x9e\xd4\xef\x8b\xa0\xde\x3d\xa8\xa6\xbd\x81\xb7\x83\x6b\xfe\x67\x18\x78\x1d\x87\x49\x85\xf6\xb9\xa9\x74\xa2\xac\xe1\x9b\x16\xbe\x12\x6d\x63\x6c\xc7\x40\x6d\x37\xc0\x7a\x4d\xaa\xd4\x3e\x73\x0c\x7f\xc2\x08\x00\x4a\x5e\x5b\xfe\x5d\x2c\xa6\x4d\x18\x94\xde\xc2\x0d\x37\x37\x3d\x10\xea\xaa\xca\x53\x19\x4b\x9f\xdd\x8c\x64\x81\x94\x82\x35\x56\x0a\xb6\x68\x29\x38\xc2\x4b\xc1\x0d\x62\x0a\x96\x98\x29\x58\xa2\xa6\x60\x8b\x9b\x82\x35\x72\x0a\x96\xd8\x29\xd8\xa3\xa7\xe0\x06\x3f\xed\x8a\xb1\xb1\xa7\xb9\xc0\x50\xc1\x19\x8a\xda\x27\x89\x6a\xb6\x72\x81\xa4\xf6\x48\x22\x9b\xd2\x5c\xa1\xa9\x9a\x7a\xe1\xb3\x64\x03\x31\x53\x36\x38\x8e\xbd\x44\x48\x35\x0c\x53\x65\xcd\x86\x67\xcd\x9c\x0d\x26\xd9\xb3\x09\x32\xab\x7c\xdb\xf6\x19\xb4\xa3\xc3\x4e\x5c\x6d\xbe\x16\xa5\x7a\x59\xbc\x9e\x26\x64\x4a\x5d\x12\xad\x6a\x1d\xf3\x6e\xd9\xc0\x9e\x4f\x1d\x78\xfe\xa3\xf8\xd4\x72\x64\x79\x3a\xd5\xd3\xa9\xed\xc7\xd3\xa9\x9e\x4e\x6d\x3c\x9e\x4e\xf5\x74\xaa\xa7\x53\x3d\x9d\x4a\xa9\xd5\x7f\x30\x9d\xaa\x3b\x64\x79\x36\xd5\xb3\xa9\x9e\x4d\xfd\x4f\x33\x5d\x7a\x36\xd5\xb3\xa9\x9e\x4d\xad\xaa\xed\xd9\x54\x4d\x95\x3d\x9b\xea\xd9\xd4\xa6\x70\xcf\xa6\x1a\xbf\xc6\xb3\xa9\x9e\x4d\xed\xc8\xf0\x6c\xea\xa0\x28\xcf\xa6\x9a\x8b\xf2\x6c\xaa\xf6\xf1\x6c\xaa\x67\x53\x3d\x9b\xea\xd9\x54\xcf\xa6\x7a\x36\x55\x3d\x9e\x4d\xad\x3d\x9e\x4d\xf5\x6c\xaa\x67\x53\x3d\x9b\x8a\x90\xf9\xdc\x06\x49\x4f\xa6\x7a\x32\xd5\x93\xa9\x9e\x4c\xf5\x64\x2a\xb1\xaa\x9e\x4c\x2d\x1e\x4f\xa6\xfe\x7b\xd8\x4b\x3d\x99\xea\xc9\x54\x4f\xa6\xb6\xcb\x7b\x32\xd5\x93\xa9\xfd\x92\x3c\x99\x6a\x26\xc9\x93\xa9\x9e\x4c\x2d\x9f\x6f\x9d\x4c\xdd\x07\xab\xcf\x6d\x45\xca\xd7\x81\x54\xaf\x34\x35\xa1\xc9\x9b\x52\xbf\x64\x5d\xcb\xe6\xb8\x66\xd1\x67\x71\x55\xbf\x63\xd2\x0d\x9d\x7c\x40\x57\x03\x14\x77\x94\xa3\x53\x78\xeb\x20\xe1\x2b\x1b\x02\xd5\xcd\xc2\x72\x5a\x54\x83\x2e\xe8\xb9\x47\x4a\x59\x65\x92\x0a\xb2\x49\x43\xbc\x57\x2a\xc3\x43\xca\x73\x18\x55\x0e\x25\xca\x12\x15\x43\xca\x43\xbe\xca\xba\x76\xe3\x3d\x4b\x29\x77\xc5\x6c\x9b\xc4\x87\xcd\x56\xd6\x2b\x1f\x9a\xa5\x43\x66\xe1\x5a\x49\x10\x7b\x7a\x76\x3d\x57\x67\x99\x0f\x97\x37\x57\xf3\xb7\x67\xef\xce\xe6\xa7\x47\xb0\x5c\x5c\x1d\xc1\x9b\xc5\x72\xb9\xb8\xc0\x7b\xc4\xd1\x2e\x32\xc7\xfa\xba\x10\xe4\x2c\x17\x57\x84\x52\xea\x6b\x91\x05\xa3\xc3\xae\x1a\x8f\xd4\xa9\x1b\x44\x19\xdf\x10\xb6\xf3\xfb\x38\xd9\xb1\x4c\x96\xff\xf3\x9f\x6c\x67\xfe\x65\xfd\x4b\xec\xa6\x91\x98\x45\x3b\x16\x3d\x59\xba\x9d\x64\x31\xb0\x30\x54\x33\x52\x4c\x1a\xdd\x14\x40\x8a\xcd\x27\xf4\x05\xcf\xb6\x14\xca\xd9\xe5\x4a\x7b\x5d\xaf\xca\xbf\xce\x6a\xdb\xa8\xb6\x83\x15\xf7\x36\xef\x92\x4f\x3b\x29\xf1\x96\x66\x7f\x6d\xfb\x7a\x34\x6d\x33\x4a\x7b\xcb\x23\x9a\xd7\x83\xf4\x02\x8b\xa5\x8b\x46\xc5\x8b\x3d\x6e\x83\xd5\x56\x52\xc8\x04\x89\x4d\x44\x6e\xcf\x14\xbb\x6c\x3f\x5b\x72\x6a\xb7\xae\x47\x75\xb0\x43\x5c\xcc\x97\xef\x17\xa7\xcd\xed\x21\xff\x3b\xaa\xd7\x7e\x51\x7c\xf6\x4b\x25\xea\xec\xb2\xfc\x33\xd1\x63\x3f\x2f\x7d\x3e\x5b\xce\x6f\x96\xcf\xb7\x6b\x75\xdb\x87\x2e\x84\xa4\xed\x39\xae\xb5\xa7\x45\x61\x82\xa2\xeb\xb8\xd6\x63\xf4\xc2\xaa\xc3\x10\xe5\x15\xf4\xce\x92\xa7\x19\x35\x70\x8b\xab\xfb\xd5\x8d\xa6\x26\xdf\xde\xfd\xca\xba\x96\x4d\x1b\xfd\x7e\x1f\x3e\x01\xcb\x7b\xc1\xc2\x5f\x93\xdd\x67\x3c\xa9\x02\x0b\x41\x90\xca\xe5\xf0\x90\x61\x15\xe8\x16\x91\x7f\x6c\xe3\x9e\xb8\x3c\x11\xd8\xc6\x3b\x99\x26\xd6\xc9\x60\x9c\x93\x32\x62\x09\x59\x8b\xd3\x8e\x71\x52\x7a\x17\x10\x24\x3e\x67\x7c\x13\xf7\xb1\x4d\x86\xe2\x9a\xd8\x85\xa4\xd0\xc7\x34\xe9\x46\x27\x21\x88\xae\xe2\x99\xf4\x45\x26\x21\x08\x3d\xbb\x07\xe6\x3c\x2a\x89\xfb\x88\x24\x83\xd1\x48\x68\xe7\xd8\xfe\x48\x24\x45\x23\x50\x86\xaa\x26\x0a\x89\x36\x9e\x88\xb5\xec\x6e\x2c\x11\x8a\x48\x9a\x1f\xa5\x8b\xc8\x23\x2e\x57\x73\x07\x11\x47\xbe\xda\xe1\xc0\x51\xb4\x91\xa9\x22\x8d\x4c\x11\x65\x64\x82\x08\x23\x43\xd1\x45\x68\x4e\x73\xbd\x91\x45\xca\x18\x21\x04\xa1\xdd\xa8\x22\xad\xf8\x20\xb4\x9d\xb8\x8c\xaf\xa0\x8f\x0d\x42\x5a\x1c\xe5\x50\xec\x8f\x0b\x42\xd6\x31\xb9\x77\x1a\x1b\x70\x18\x03\x46\x72\xc5\xeb\x89\x05\x62\xe3\x30\xa6\x8f\x03\x52\x45\xf4\xa0\x1c\x35\x3a\x31\x40\xfa\xa3\x79\x50\xc6\x6a\x32\x14\xc9\xc3\x2e\x70\xa4\x26\x8a\x47\x2d\x1e\x07\x4d\xcd\x33\x49\x04\x8f\xe1\xe8\x1d\xf4\x25\x70\x22\x4f\xb4\xe9\xbc\xd0\x06\x22\x76\x40\x14\x13\xbb\x6c\x9a\x68\x1d\x93\x44\xea\x18\x8e\xd2\x61\xa1\xf2\xeb\x8d\xd0\x51\x8f\xb5\x41\x97\xdb\x8e\xce\xd1\x8a\xb3\x41\x17\xac\x22\x73\x68\x62\x6c\x58\x8a\xcc\xa3\x72\xe8\xe3\x6b\xd0\x65\x37\x23\x72\xe8\x63\x6b\x38\x91\xfe\xe3\x2b\x9d\xf4\x57\x36\xd2\x35\x91\x38\x1c\xc6\xd4\xa0\xea\x62\x6d\x62\x69\xd8\xc5\xd1\xb0\x8a\xa1\x61\x15\x3f\xc3\x2a\x76\x86\x6d\xdc\x0c\xcb\x98\x19\x2e\xe2\x65\x38\x89\x95\xe1\x28\x4e\x86\xa3\x18\x19\x8e\xe2\x63\x38\x8a\x8d\xe1\x28\x2e\x86\xcb\x98\x18\xce\xe2\x61\x58\xc6\xc2\xb0\x89\x83\xe1\x34\x06\xc6\x70\xfc\x0b\x1a\x51\x33\x51\xec\x8b\x69\xe2\x5e\x0c\xc6\xbc\x20\xc2\xcd\x7d\xf1\x2e\xca\xc8\x15\x44\x4b\xb5\x2e\xd6\x45\x2d\x6a\x05\x41\xaa\x2e\xce\x85\xad\x3d\xb9\x13\xe3\xa2\x15\xad\x82\xa6\x6a\xa8\xc7\xb7\xe8\x8b\x54\x61\x77\x95\x75\x17\xa5\x62\xb2\x08\x15\x43\xd1\x29\x6c\x2f\xb1\xdd\xc8\x14\x55\x8c\x09\xca\x78\xed\x8b\x4a\x41\x77\xb5\xed\x8b\x48\x61\x13\xa1\xa5\x2f\x1a\x45\x33\x34\x03\xed\xb2\xdd\x59\x9e\x5a\x31\x25\x28\xad\xda\x1f\x85\x22\xef\x3d\x0b\x4b\x5d\x23\x02\x85\xe5\x2a\xa0\x8f\x3e\x51\x8b\x23\x41\xd4\x64\xb5\x16\xe7\x76\x0c\x09\x92\xe6\xa9\x11\x75\xa2\x1d\x3f\x82\x32\xa6\xb4\xd3\xbd\x1d\x3b\x82\xd4\x00\x4e\xa2\xad\x3b\x8a\x19\xe1\x26\x5e\x04\x39\x56\x84\x65\x58\x05\x17\x31\x22\xec\xe3\x43\xb8\x34\x1c\x59\xc7\x85\xf8\x6a\x66\x23\x27\x31\x21\xa6\x89\x07\x31\x14\x0b\x82\x66\x8b\xe9\x8d\x03\x51\x46\x74\xa0\x5a\xea\xdb\x31\x20\x9a\xd1\x1c\x48\x52\xeb\xf1\x1f\x34\x91\x1c\x28\x67\xe3\xa2\xe9\xf4\x51\x1c\xc8\x56\xbd\x09\x22\x38\x4c\x10\xbd\x61\x20\x72\x03\x35\xf0\x79\x5f\xd4\x06\x7a\xcc\xf7\xfe\x88\x0d\x35\x13\x1a\x45\x97\xdd\x8e\xd6\xd0\x31\xa1\x91\xbc\x5f\x5a\x46\xb7\xa6\x09\x8d\xb2\x7d\xb7\x8d\x6e\x6d\xf3\x19\xf1\xe8\xa2\x0f\xa3\x3f\xb3\x88\x86\x51\x45\x67\xd0\xc6\x59\x70\x68\x2c\xec\xc6\x58\x20\xdf\x34\x9c\xc7\x57\x18\xb2\x66\xd5\x6c\x53\x34\x7f\xa2\xfe\xb3\x9c\x55\x02\x9d\x5e\x6b\x56\xd1\x30\xd6\xc7\xf8\x32\x9e\x42\x3b\x32\x02\xc9\x5e\xa4\x8b\xa5\xa0\x89\x8a\x40\xb9\x2b\x97\x86\x2c\x47\x11\x11\xa8\x26\x00\x8b\x48\x08\x76\x51\x10\xac\x22\x20\xb8\x88\x7e\xe0\x20\xf2\x81\x4d\xd4\x03\x9b\x88\x07\x56\xd1\x0e\xec\x22\x1d\xd8\x44\x39\xb0\x8c\x70\xe0\x20\xba\x81\x8b\xc8\x06\x6e\xa2\x1a\xb8\x89\x68\xe0\x26\x9a\x81\x9b\x48\x06\x6e\xa2\x18\xb8\x8b\x60\x60\x15\xbd\x20\x6b\xde\xe9\xce\x59\xb4\x39\xb0\x0d\x37\x5f\x61\x49\x57\xdc\xd6\xd5\x76\xa9\xaf\x84\xb1\xb8\xe6\xdd\x50\x81\xf9\xa5\x1f\xe0\x3d\xcf\x9a\xe4\x97\xe9\x9e\x73\x88\x82\x6c\xf1\xc0\x93\x24\x58\x3f\x73\x7b\x7c\xa8\xbd\x99\xd6\x08\xe2\xd8\x2c\xea\x2f\x0e\x78\xf2\x40\x96\xeb\x50\xe5\x51\x0c\x31\xc2\x64\xe3\x35\x71\xf9\xdc\x07\x39\x52\xca\x25\xb4\x97\xb4\xac\x95\x74\x67\x8f\xf3\x2f\x04\x16\x3d\xa9\xbf\x96\xb6\x05\xb6\x92\xf0\x5b\x84\x53\x4c\x16\x15\x55\x5e\xeb\xe2\xf3\x15\x05\x2b\x0e\x1b\xa5\x9e\x92\x61\x24\x4a\x5c\xe0\x56\xd4\xeb\xd6\x3c\x80\x45\xc2\xef\x3f\x3d\xfc\xf0\x32\xe1\x69\xf6\xf2\xe1\x87\x97\x45\xb8\x88\x13\x75\x74\x3f\xcd\xbb\x28\xc6\xc5\xb7\xc8\x55\x82\x11\xdc\x5e\xb4\xc4\xdc\x0e\x8f\xe5\x6c\x9b\xf0\x74\x1b\x1b\x98\x30\xcd\x0d\x96\xed\xa9\x5b\xbe\x62\xa4\x5c\x6b\x78\xe6\xa5\x20\x0c\x22\x71\x67\x48\xd8\x63\x04\xdb\x38\x09\xfe\x57\x8c\x53\x71\xc3\x57\x06\x86\xd1\x56\x12\xbd\xb4\xda\xb2\x24\x1b\x9b\xd5\x24\xad\x28\x45\x07\x6a\xa8\xf1\x44\xa1\x66\x64\xd5\x9d\x71\xf7\x60\x69\xa9\x55\x1c\x9a\x66\xd8\x47\xaf\x88\x6d\x8f\x79\xf1\x2a\x5c\x41\x7a\x33\xc9\x97\x19\x2b\x28\xbb\x9e\xec\x69\xc6\x32\xae\x5a\x47\x6a\x83\xa4\x4e\xbf\x9c\x87\xa6\x5b\x8f\xac\x45\x61\x51\x90\xb0\xbb\xba\x43\x33\xc8\x2b\x8c\x4e\xba\x55\x24\xd9\x7a\xbb\x38\x5f\x5c\x37\xa1\xdd\x9f\xae\xe7\xbf\x1e\xc1\x9b\xf3\x0f\x73\xf9\xe7\xb9\xb1\x0b\xdf\xaf\xf3\xf3\xf3\xc5\xc7\x23\x58\x5c\x8b\x03\x87\xf4\xf3\x32\x73\xe9\x32\xbf\xc3\x1d\x77\x2b\x6c\x58\x4e\x7c\x95\xe1\x4f\xc5\xa7\x9b\x4b\x9d\x9b\x9d\xd0\x8e\xf3\xe6\x31\xfc\xb1\x6a\x43\xc3\x1f\x5f\x1b\xb5\x02\x32\x74\x8c\xed\x34\xc5\x85\x88\x71\x30\x55\xf1\x01\x5e\xba\xd3\xb5\x6c\xa3\x7c\xb2\x72\x58\x1d\x12\x69\xcf\x46\xcf\xd9\xb2\x3a\x93\xcc\xdb\x9e\x78\x2c\xb3\x37\x8b\xbf\xcd\x8f\xe0\xcd\xfc\x7c\xf1\xd1\xfd\xdc\xa3\x07\x5e\x39\x56\x35\x33\x9d\x80\x73\xb3\x89\x12\xb2\x3b\x1e\x3e\xcf\x68\x3e\x17\xaf\xc2\x0f\xaa\x99\xaa\x63\x39\x9a\x50\xa3\x48\xf6\x35\xe6\xf3\xa2\xc3\xee\xce\x50\xa7\x5a\xc4\x84\x59\xc7\x07\xd3\xc1\xd7\x6a\x91\xbf\x89\xda\xe1\x5b\x64\x59\xa7\x9d\x9a\x4d\x52\xfd\x9b\xe1\x64\x48\xb7\xf1\x21\x5c\x4b\x0e\x56\x59\xd9\x0b\xa0\x28\x62\x59\xf0\xc0\x21\x5d\xb1\x90\xe3\x0c\x23\xea\x60\x3e\x72\x8c\x0e\x76\x3c\xdd\x06\xf7\xd9\xe9\x21\x31\x0a\x2a\x80\x18\x7c\x9a\x9b\x70\xe3\x55\xb8\x53\x35\x87\x75\x5e\xae\x86\x4f\xa7\xfb\x90\x3d\x01\xcb\x63\x8a\x04\xa9\xc1\x0a\xad\x4e\xd4\x30\xab\x95\x51\x7f\x07\x69\xb0\x3b\x84\x19\x8b\x78\x7c\x48\xc3\x27\xd1\x23\x8f\xa9\x29\x0f\x76\x9f\xc4\x3b\xc8\x1e\x63\x21\x24\x08\x59\x72\x1c\xf2\x68\x93\xe5\x77\x73\xa5\x75\x4e\xe1\xf7\xfc\x64\x73\x72\x04\x8f\x9c\x7f\x3e\x16\x37\xc4\x63\xf1\xa7\x51\xd1\xaa\x1b\xd3\x3f\x9c\x34\x1b\xa1\x50\x61\xef\xe3\x34\x10\x23\x44\x81\x20\x81\x0c\x7c\x3d\x2a\x33\x8e\xc2\xb6\x41\x4b\xb6\x41\xce\x2f\xc9\x6b\xb5\x74\xfb\x8b\xef\xe1\xfc\xec\x72\x0e\xfb\xd0\xc0\x3f\x47\x8c\x8d\xe1\xe1\xf6\x65\xf6\x25\x30\xbc\xb0\x19\x5d\x1f\x5a\x43\xec\x17\x21\xde\xb4\x08\x76\x77\x36\x11\xde\x19\xb3\xd5\xcd\xa3\xcd\xa0\xff\x02\xec\x4b\x30\xaa\xb0\xc1\xdc\x5c\x8c\xb7\x10\xe4\x06\x42\xdc\x3e\x3a\x6d\xa1\xb6\x8f\x7c\xa9\x34\xf9\x78\x50\xab\xde\xf4\x9f\x74\x23\x5e\x83\x29\x46\x1a\x3b\xf2\x2d\x86\x07\xba\xee\x71\x4e\x34\x98\x6a\x8f\x13\x78\xf3\x24\x76\x09\x76\x08\xb3\x23\x60\x52\xcf\xc0\xcc\xb6\x4a\xb5\x8b\xe4\x11\x28\xba\xdc\x14\xdc\xbc\x9d\x9d\xcf\x1b\xc7\x30\x23\xb1\x62\x89\x98\x5d\x1f\xc1\xf9\xe2\xa7\xef\x5f\x99\x9c\xd5\x4c\x4f\x6a\xc7\xdd\x1a\x19\x95\x52\x15\x32\xfb\xa9\xa8\xf3\xe0\x2f\x9f\xa6\x5d\xb4\x7e\x9d\x72\xd1\x32\x11\x8e\x58\xb4\x7e\xf5\x8b\xd6\x7f\xce\xa2\xf5\xab\x5f\xb4\x1a\xcf\x37\xbb\x68\xe5\xe7\xe0\x4b\x31\x2c\x6a\xc5\x7b\x86\x57\x47\xb7\x51\x16\xae\xfd\x68\x20\x6c\xf6\xee\x90\xb1\xbb\x90\x1f\xc1\xf6\xb0\x63\xd1\x71\xc2\xd9\x5a\xfc\x3f\x08\x71\xd5\xe4\xe0\x19\xdb\xe0\x2b\x33\xcf\x58\xfd\x5f\x85\xec\x45\x14\x76\x15\xda\x8d\xea\xfd\xf7\xad\x78\xd9\x7f\xdf\x96\x91\x95\xc4\x0d\x35\xde\x67\xc1\x2e\x48\xb3\x60\x05\xab\x38\x52\x9a\x8f\x95\xf2\xb7\x4f\xe2\x10\x58\x0a\x0c\x1e\x5b\x16\x81\x2c\x86\x2d\x0f\xf7\xb0\x4f\xf8\x03\x8f\x9a\x17\x02\x38\xec\xd7\x2c\xe3\xf2\x44\xcc\x60\x1f\x87\xc1\xea\x49\x9d\xfa\xc5\x49\xfe\x31\x09\xa4\xb7\x5d\x07\x12\x91\x3e\x22\x92\xa9\xae\xd7\xb2\x70\x11\x29\xee\x78\x09\x4f\xf7\x71\x94\x4a\x6c\xe1\xbf\x6f\x7f\xe2\x59\x39\x49\xff\xfb\xf6\xa8\x13\x3c\xe1\x90\xf2\x44\xb9\x90\xf3\x2f\x7b\xbe\xca\xd4\xf2\x2c\x1d\xc0\xb7\x2c\x93\x2d\x5f\x09\xfe\xfb\x81\xa7\x99\x92\xfb\x41\x7e\x41\x4d\x74\xfb\xe3\x79\x94\x1e\x64\xf0\x25\xe5\x8a\x17\x24\x85\x8b\x56\x11\xa2\xa9\xb5\x1b\x28\x42\x86\x27\xa9\x84\x79\xba\x57\xd3\xf2\x55\xa2\xd9\xef\x83\xcd\xa1\xc8\x51\x2b\x6d\x58\xd2\xf2\x93\x5f\x7b\xa3\x58\xdd\x65\x58\x2a\xfa\x6f\x7d\x90\x3e\xab\xeb\xa2\x78\x43\xe8\x2a\xe1\x1d\x66\xe8\xcb\xf1\xe7\xc3\x1d\x4f\x22\x9e\xf1\xf4\x38\xd8\xe5\x23\xb4\x35\x6a\x36\x49\xb0\x56\xab\x5d\x77\x5c\x76\xf6\xed\x0e\xa9\x56\x94\xd5\xfd\xa6\xb5\xb0\x6a\x7f\xdc\x18\xb1\x6f\xe3\x48\x46\xd0\x0a\x52\x69\x97\x8a\x36\x7c\x5d\x04\x8e\xb8\x63\x69\xb0\x82\x50\x16\x57\x3d\x91\xf0\xe3\xfb\x50\x5c\x45\x99\x18\x90\xfb\xd6\x9a\x1f\x06\xa9\xb4\x84\x06\x91\x52\x83\x04\x71\xc4\x42\xe0\x21\x57\x8e\xaf\x61\xf0\x59\x74\x9f\x5c\xd6\x65\x1c\xa4\x20\x6c\x5a\x89\xd5\x47\x88\xfe\x09\x83\x55\xd6\xd8\xb0\x8f\x61\x17\xa7\x2c\x58\x75\xbe\xe5\x18\x92\xf8\x51\xf3\xb7\xab\xda\x8e\x52\xfb\x87\xbe\xe3\x80\xfa\x79\xe7\x8c\x30\x14\x20\x78\x28\xfc\x6f\xd7\xb4\x22\xa4\xb7\x7e\xd4\xd9\xdf\x95\x9e\x49\xb4\x60\x5e\x1d\xe5\xd0\xac\x82\x9d\xca\x70\x36\x01\x7f\x7c\x21\xee\xc5\xeb\x6c\x2b\x3a\x4c\xc5\x6c\xd3\x28\xbd\xce\xee\x21\xde\x05\x59\xa6\x82\x62\xa5\x5c\xce\xba\xff\xe5\x49\x2c\xb6\xbb\xf4\x29\xcd\xf8\xae\xd8\x00\xcb\x09\x25\x97\xab\xc7\x6d\x10\x8a\xa9\x1a\xad\xb9\x0c\xd4\xd6\x90\x9c\xf7\x9d\xbe\x91\x74\x46\xcd\x56\x33\xe4\x5b\xfa\x58\x33\xf4\x0c\x1f\x65\xb5\x4e\x78\x35\x4c\x95\xbb\xf7\x96\x17\xcd\xd5\x69\x86\x24\x7e\x3c\x96\xe1\x43\xda\x2d\x64\x64\xa5\x34\xb1\x4a\xf6\x58\x21\x07\x4f\xe0\xe3\x73\x55\xdf\x50\x63\x87\xd9\xbb\x90\x45\x9f\xf5\x87\x82\xd1\x1b\x41\xab\xa7\xde\x08\x51\xc3\x3f\x1c\xab\x7c\xbf\x88\x96\x26\x57\x56\x1b\xd2\x3d\x5b\xf5\x28\x6c\x06\x96\x85\xea\x39\x86\x2f\xea\xa4\xd8\xf3\xaf\xe9\x2a\x4e\xf8\xaa\xbd\x80\x57\xff\x9e\xf1\x2f\x7d\x45\xc3\x78\x93\x5e\xb1\x48\x7b\x66\x2f\xff\xcd\x4d\xbb\x9f\x0f\xbc\x0a\xd1\xf6\x26\x62\xbe\x5e\x7b\xde\xf5\x8e\x8c\xf1\xcb\xda\x58\xf6\x17\xa3\x6b\x0c\x2e\x9b\x4b\x6b\xb8\xaa\x1a\xa8\xd5\x68\xb5\x8d\xe3\x94\xa7\xf9\x0a\x1d\xc6\x1b\xe0\x51\xa6\x52\x0d\xc4\xf9\xc1\x6a\xe8\x8a\x76\xc3\x39\xfc\x36\x5b\x3f\xb0\x68\xc5\xd7\xb2\xef\xe1\xff\x1e\x54\x48\xcc\x7e\x4f\x98\x30\xde\x6c\x82\x68\xf3\x72\x1d\xaf\xd2\x97\x62\x43\x78\xc9\x72\x09\xc7\x7f\x57\x85\xff\x30\xf4\x4e\x71\x84\x6d\xd6\x54\x7c\xc8\x8e\x65\xab\x7a\x60\x77\xb9\xce\x56\x31\x6d\x66\x43\x5a\x5c\xb9\x80\x16\xe5\xa4\x20\x19\x2e\x31\xac\xbf\xa5\xaf\x46\x05\xd1\x28\x4e\xfc\x03\x17\x74\x13\xb7\x99\x76\x7c\xf7\xba\x64\xb3\xbe\x95\x5b\xb0\xf8\xb9\xd8\x81\xf3\x66\xae\x12\xd3\xa8\x58\x77\xa1\x4c\x2a\x21\x66\xfd\x40\x8b\xdc\xc7\xc9\x09\xbc\x55\x06\xce\xf0\x49\x29\xb8\xcb\x8c\x36\xa2\x65\xd3\xc3\x7e\x1f\x27\x59\x1e\x68\xb2\x74\xf0\x1a\xba\x41\xab\xbd\x57\x6d\xd6\xc5\xce\x9d\xef\x7e\xdb\x38\xcd\x0a\xf9\xfd\x5d\x8f\x74\xcc\xc1\xb9\xe4\x8c\x3a\xe3\x18\xaa\x17\xb0\x38\xa1\xfa\x7d\xc2\x25\x1c\xbd\x1a\xd6\xf1\x1c\x97\x7d\xf9\x1a\xde\x8a\x59\x55\xfc\xef\x8e\x45\x6c\xc3\x93\x97\x57\xaa\x09\x07\x2f\xbd\xf2\xa6\xf0\x5a\x0e\x13\xcd\xef\xca\x45\xd1\xcd\x6e\x70\x33\xb8\xc6\x1a\xef\x06\xc3\x62\x5a\xcb\x5b\xf9\x09\x90\x1e\x76\x3b\x96\x04\xff\x2b\x91\xbd\x56\x64\xb6\x09\x37\x6a\xd2\xc6\x31\xbc\x49\x43\x19\x22\xb6\xaf\x4a\x2d\xf7\x5c\xe2\xe6\xb4\x61\x87\x0d\xff\x5b\xc0\x1f\xc7\x56\xb2\x11\xd5\x6c\xfb\xc2\x57\x88\x1d\x2f\x60\x3c\x14\xc6\x65\x36\xc6\xc5\x47\xb1\xec\x28\x70\x51\x5e\xb6\xcb\x41\x92\xc5\xd2\x36\x09\x4c\x7d\xfc\x98\x8f\xa0\x99\x36\x36\x8c\x1f\x79\xf2\x26\x3e\x44\x23\x11\xc1\x8d\xad\xf3\x08\xab\x7c\xe7\x40\x56\x54\x65\xb0\x54\x57\x55\x2b\xca\xc1\x9d\x28\x58\xf9\xa7\xd5\x9b\xc8\x20\xe0\x73\xc3\x9c\x9f\x9b\x85\x95\x8a\x82\x85\x8f\xec\x29\x15\x97\xb6\x4d\xc2\x59\x7e\x1c\x89\xc6\x59\xb5\x92\x64\x12\xd5\x19\x3a\x25\x1c\xf6\xfb\x6f\xa5\x07\x3e\x94\x55\xc1\xf5\x80\xfc\x84\x67\xe8\x81\x90\xcb\xd4\x38\xb2\xf9\x8d\x52\xef\x8e\x34\x7f\xba\x67\xc9\x67\xb9\x40\xba\x5f\x49\x6e\x1a\xb2\x1d\x2e\x27\x86\x82\xb1\x6b\x8a\x6c\x8b\xb1\x35\x65\x78\x6d\x07\x75\x5b\x29\xeb\xb7\x7c\xda\xf7\xf7\xba\xd9\xf2\xb4\x0b\xa2\x19\x26\x71\x81\xb1\x69\xa5\xd5\x5b\x17\x9d\xf7\xd0\xd7\xa0\x38\xaa\x47\x3f\xbe\x97\xba\xd8\x68\xf5\x34\x4e\x29\x54\x23\x5f\xea\xfb\x44\x5d\xf8\x1a\xee\x9e\x72\x5c\xb1\x8c\xa5\xb4\x0b\xa2\x60\x77\xd8\x19\xa7\xa5\x57\x7e\x28\x45\xb6\x37\xe9\xbb\x57\x3f\x64\xa8\xa4\x8f\xf0\x4e\xcc\xa9\x2f\x4c\xbc\xf9\x08\x82\x31\x64\xb8\x0c\x2c\x10\xa4\xb0\x3f\xdc\x85\x41\xba\xe5\xe2\xd3\x57\x1c\xf8\x03\x4f\x9e\xe0\xfb\x57\xa2\x9e\x87\x8c\xa7\x10\x64\xf0\x28\x66\xf2\x88\xc8\x28\x16\x97\xa3\xcf\xa2\x5e\xb9\x4e\x5c\x71\x2e\x2a\xc4\x4b\xb0\xc9\x9b\x95\x65\x32\x5a\x91\x12\x3e\x22\xb2\x48\xe0\x20\x43\x1d\xd7\x23\x91\xc4\xfb\x5c\xe5\x25\x84\xf3\x2f\x41\x9a\xa5\xea\xee\x30\xca\x53\x30\xd8\x06\xd1\xa0\x6b\x7e\x73\x02\x4c\x33\x5e\x6f\xcc\x26\x19\x38\x58\x61\x9a\xaf\x1a\x35\x09\x36\x8d\x81\x4d\x13\x92\xa8\x81\x58\xe2\x65\x0b\xa9\x91\x9e\x2f\x40\xa3\xfd\xa8\xf6\x94\x55\x51\x29\x9d\x39\xf0\x6a\x76\xfd\xf3\xa7\xb7\xef\x67\xd7\xcb\x4f\xcb\x5f\xaf\x50\x96\x41\x55\xf6\xfc\xec\x72\x7e\x94\xff\xf9\xcd\xec\x7a\xd8\x36\x38\x6e\x15\x3c\x1e\xac\x92\x51\x51\x51\x23\xa3\x1f\xbe\xe9\x35\x20\x9a\x00\x33\x84\x3b\xbf\x01\x85\xd1\x35\x0b\x57\x75\xa9\x7c\xf9\xca\x60\x76\x59\x89\x1a\x0c\xc6\x2b\x68\xee\x5f\x9b\xe0\x81\x47\x6d\xde\xff\xc5\x8b\xd2\xe5\x59\x0e\x0e\xb9\xb6\x0d\x88\x64\x11\xb0\x55\x26\x8e\x6b\xea\x10\xf2\xe5\xa8\xf5\x96\x40\x45\xff\x80\x35\x8b\x36\x3c\xc9\x6b\x39\xb8\x44\x7e\x11\x65\xba\xc7\x15\x19\x41\xa2\x10\x53\xb6\x46\x6e\x79\x4b\x82\xcd\x86\x27\x43\xeb\xcf\x1d\x0f\xe3\x47\x99\xd7\xa2\x75\x16\x1d\x95\x3e\xd4\xa0\xf9\x7b\x81\xdd\xc5\x0f\xfc\x04\x6e\x94\x27\x63\xf8\x24\x76\x01\xf5\x21\xf2\x5f\x5e\xca\xb7\x4b\xf3\x69\x12\x0d\x2f\x57\x3d\x5f\x56\x97\x53\x66\xd0\xe0\x23\xea\x4d\xf5\x14\x3d\x90\xbf\x3b\xef\x82\x63\x38\x44\xb2\x91\xbf\xa8\x80\xd3\xfb\x43\x26\x77\x9b\x5a\x67\x0d\xad\xd3\x42\xc6\x09\xfc\xfe\x34\x6f\xb0\xe4\xb0\xdb\xa7\xc5\x1b\x4e\xfe\x00\x30\x4b\x25\xe0\x5f\xec\x89\x32\x05\xce\x70\xc0\x07\x56\x1b\x34\xd2\xb0\x27\xd5\x7c\x71\x18\xc6\x8f\x32\x08\x60\x7c\x48\xea\x93\xe0\x1f\x26\x1e\xd2\x7f\x7d\x75\x24\x03\x67\x64\x7c\x13\x27\x4f\xaf\xe1\xc5\x8b\xd3\xd9\xe5\x4f\xf3\xeb\x17\x2f\x8e\xaa\xde\x13\x7f\x2d\xdd\xd3\xc5\xdf\xfe\xf3\xe8\xb5\x99\xe8\xbf\x74\x44\x7f\x9c\x5d\x5f\x9e\x5d\xfe\x34\x24\xdb\x4c\xf4\xf7\xa6\xb5\x56\x7e\xfe\x08\xc9\x7f\x34\xae\x74\x25\x5a\x06\x5d\x19\xf4\x71\x56\xbb\x48\xcf\x94\xfd\xfe\x95\x3a\xc0\xe4\xc6\x76\xc3\x21\x2b\x06\xa1\xfa\x68\x35\xd4\x8e\x8a\x97\x34\x26\xef\xf7\xaf\xe0\xee\x90\x55\x2f\x1e\x90\x58\xaf\xd2\x1f\x5f\x01\x83\xfc\xc3\x5b\xe2\xc5\x69\x62\x95\x49\x47\xdf\xec\x91\x0f\xc6\xad\x13\x52\xa2\x35\xfc\x45\xfc\x07\x16\x3f\x0f\xd5\xb3\xfe\xf2\xbf\x0c\x39\xce\x35\xbe\x06\xfe\xaa\xa9\xa7\x78\xa5\xe6\x15\x86\x5f\x2e\x25\xd6\xdb\xf5\xa4\x7f\xa3\xfe\x16\x14\xb1\xa3\xbe\x81\xf8\x03\x9a\x11\x02\x69\xea\x8d\x67\x84\x3d\xd2\xdd\xff\x4d\x50\x47\x9b\x16\x30\xa5\x1b\xd1\x5c\xa3\x21\xa4\x69\x44\x46\x0d\xb0\x8a\xa3\x2f\xe8\xb2\x8c\x14\x4a\xd1\xc4\x89\x0d\x4f\x26\x1a\x31\x89\x46\x34\xa2\x19\x87\x68\x48\x20\x1a\xb2\x87\xe3\xd4\xa1\x31\x6f\x48\x9f\x1f\xa6\x8c\xa1\xcd\x1c\xc1\x60\x85\x13\x01\x85\x48\x94\xd0\x38\xa3\x87\x03\x88\xd0\x6c\x6a\x50\xc0\x41\x33\x64\xd0\x04\x16\x34\x72\x97\xa6\x0f\x42\x13\x37\x69\x17\x50\xa0\x11\x0e\x88\x00\x01\x51\x08\x20\x01\xfe\xc3\x63\x7f\xb9\xd2\x7a\x74\xdc\x1a\x01\x7f\x23\x28\x5f\xcb\x92\xe6\x56\x7f\xbd\x34\x30\xd3\x01\x6d\x5d\x32\x94\xdc\xe3\x8b\x9c\x47\xbb\x16\x23\x4f\x6a\x2e\x75\x36\xd3\x81\xe6\x57\xc4\xde\x96\xc3\x4d\xc6\x56\x9f\xd7\x49\xf0\xc0\x93\x82\xb5\x83\xd9\xd5\x99\xad\x41\x2d\x6b\x85\x90\x35\xd1\xff\x19\xe0\x23\xbd\xbd\x33\xe2\x51\x03\x2e\xba\x08\xf5\xb6\x46\xb7\xa9\xdf\xc3\x9e\x25\x6c\xc7\x33\x9e\xa4\x84\xf0\x45\xe3\x46\x07\x90\xab\xe8\xfd\x78\xdd\xcc\x09\x15\x86\xc9\xb7\x4d\x0b\x7c\x92\xa7\x42\x46\x24\x95\x76\xde\x91\x98\x97\x37\xfa\xb5\x0e\x75\xa8\xb4\xd6\x8f\x98\x49\x58\x3c\x8d\x6c\x5e\x1f\xa4\x75\xaa\xd4\xd2\xd7\xf2\x5d\xee\x0e\x61\x16\xb4\x3d\xa0\xf5\x4f\xbd\x12\x62\x89\xa8\xa2\xf9\x3f\x04\xfc\x31\x2d\x16\xd6\x7e\xc7\x8a\xfa\x83\x8b\x1f\x43\xca\xb0\x6d\x0b\xf5\x53\x32\x69\x1b\x64\xcf\xae\x72\x61\x1b\x49\x84\xc1\x8c\xd9\xed\xfc\xd7\x86\x22\x5b\x59\xb2\x27\xcf\x79\x3d\x92\xe7\xba\x91\xb5\xda\xf4\x13\x7a\x73\x5b\xf7\x64\xaa\x36\x94\xdb\xc9\x67\xdd\x9f\x9d\xda\x50\x62\x91\xc3\xda\x5d\x46\xea\x91\x2c\xd4\xdd\x9c\xd2\x86\x72\x9b\x99\xa7\xdd\xe5\x91\x36\xce\x1d\x6d\x9e\xeb\x79\x30\x5f\x34\x26\x1b\x02\x35\xe3\xb3\x75\x9c\x2a\x5a\x66\xe7\x29\xf7\x29\x8b\x84\xcd\x46\x49\x9a\x6b\x29\x97\xcd\x27\xa3\x26\x31\xf3\x40\x9a\x65\x43\xb9\x32\x19\xf3\x58\x6a\xe5\x66\xa2\x64\xe3\xc9\xde\x4d\xa7\x3c\x90\x1c\xd9\x50\x6a\xae\xd2\x1d\x4f\x88\x5c\xa4\x37\x36\x9d\xf0\x7d\x49\x90\x2d\xe2\xb1\x8f\xc4\x60\x27\x65\xef\x65\xd1\x7a\x34\x75\x31\xa5\xae\x9a\x74\xc5\x23\xc9\x87\x4d\x57\xe8\x56\xf4\xf5\xa1\x84\xc3\xa6\x73\xa1\x13\x75\xbd\x27\xc9\xb0\xf9\x1e\x3a\x9e\x58\x18\x1b\x8a\xdd\x30\xfc\xba\x75\x6a\x60\xc3\x74\xc0\xb2\x55\x0c\x45\xf6\xa7\x00\xee\x24\xf4\x35\x94\xa8\x4d\xfb\xdb\x97\xc4\xd7\x74\xc6\x8e\x44\x48\xa7\x26\xee\x35\x4a\xd6\x5b\x4f\xbd\x8b\x93\xab\x49\xd0\xeb\x2a\xdd\xae\x71\x8a\x5d\x7d\xc2\x5c\xdc\x5b\x9a\x69\x75\xf5\x49\x72\xc9\x12\x5f\xfd\xd8\xad\x79\x95\x18\x17\x27\x96\x98\x3e\x17\x13\xef\x0d\x9b\x26\x17\x9f\x1a\x17\x9d\x0e\x17\x9d\x02\x17\x9d\xf6\x96\x92\xea\x96\x90\xde\x96\x9a\xd2\x96\x9c\xc6\xd6\x22\x75\xad\x45\xba\x5a\x8b\x14\xb5\x16\x69\x69\x2d\x52\xd1\xda\xa6\x9f\xb5\x4a\x39\x4b\x48\x33\x8b\x4d\x2d\x6b\x9d\x4e\x76\x34\x85\x6c\x33\x21\xac\xe9\x71\x6a\x3c\xf1\x8a\x5d\x12\xd8\xd1\xc4\xaf\xf5\x34\xae\x18\xa5\x47\x3b\xd9\x6b\x6f\xea\x56\xec\x89\xfd\xc4\x55\xba\xd6\xd1\x14\xad\xed\x84\xab\x86\x72\x7b\xd2\xb2\xf6\x25\x59\x35\xfd\xfe\x32\x15\xab\xdb\xc4\xaa\xa3\xc9\x54\x6b\xa9\x51\xcd\x45\xea\x13\xa8\xf6\xa7\x43\x35\xed\x32\x99\x34\x75\x3c\x05\x6a\x6f\x42\x53\xc3\xf7\x34\xd2\x9e\x3a\x4c\x62\x3a\x9e\xb8\x14\x9d\x6d\xb5\x27\x59\x69\x4f\xea\x51\x44\x3b\xd7\xbc\x29\xdd\xa4\x1b\x1d\x49\x31\x6a\x3d\x8c\xeb\x69\x45\x6d\x92\x84\x5a\x24\x06\xa5\x27\x03\x45\x25\x00\x25\x24\xcf\xa0\x26\xfa\xa4\x25\xf7\xb4\xd5\x11\x92\x92\x78\x4e\xa9\x21\x24\xe7\xe6\x34\xc8\xc7\xd9\xc8\xae\x69\xbc\x77\xb5\x73\x70\xf6\x67\xd4\x34\x15\x99\xe7\xdd\x74\x98\x45\x73\x34\x73\x26\x3a\x85\xa8\x36\x5b\xe6\x60\xee\x4b\x63\xfd\x4d\x9e\x21\x73\x34\xdf\xa5\x68\x0d\x43\xa1\x43\x39\x2e\x3b\x19\x2b\x11\x3d\x5f\xe5\xb5\x74\x9a\xa5\x72\x3c\x33\x65\xa5\xe3\x34\x14\x69\xa0\x09\xed\xe4\x96\x34\x3f\x10\x19\xe4\x93\xac\x65\x87\x44\x34\x70\x47\x8b\xd9\xa3\x83\x75\xa8\xc1\xac\xb2\x40\x1a\x9f\xb2\x95\x82\xd2\x71\xe6\x47\xc3\x6c\x8f\x98\xec\xe5\x43\x19\x1e\xbb\xf9\x1a\x4d\xe7\x96\x2e\xab\x63\x4f\x8e\x46\xd3\xf1\xdf\xc9\xe4\xe8\x26\x2f\x23\x46\xb7\x84\xcc\xbf\x88\xcf\xb9\x88\xce\xb3\x48\xcd\xad\x48\xcc\xa7\x88\xcd\xa1\x88\xcd\x9b\x88\xce\x95\x88\xcf\x8f\x88\xcd\x89\x48\xc8\x83\x48\xcc\x7d\x48\xcd\x77\x48\xcf\x71\x48\xcf\x6b\x48\xcf\x65\x48\xcf\x5f\x48\xcf\x59\x68\x97\xa7\x90\x94\x9b\x70\x2c\x96\x56\xf1\xa0\x0e\xe3\xb8\xd8\x5a\xc5\x33\x10\xe2\xf5\xb7\xca\x85\x26\xaf\xb1\x79\x9a\xb8\x87\x1f\x5e\xaa\x22\x46\x79\xe0\x94\x47\xd0\x9a\x47\x99\x74\x32\xaa\x39\xbb\x16\x7a\x83\x32\x18\x54\x37\x10\xaa\xfe\x29\xa3\x3e\x65\x71\x8e\xb4\x8f\x14\xda\x07\xab\xcf\xed\x6b\xc4\xb4\xbe\x7e\x57\x9a\x37\x9a\xcb\x70\x75\x55\x22\xd5\xa2\x39\x6a\x58\xf4\x59\x8c\x91\x3b\x96\x36\x13\x29\xe6\xa3\xc6\xb5\x67\xdd\xbf\x64\xda\x27\xa7\xfd\x63\x9b\x18\xea\xbd\xba\xb6\x16\x01\x40\x92\xbc\x03\xb3\x18\x52\x1e\x9a\x8c\x64\xf5\xd4\xbb\x5a\x4e\xe1\x3d\x93\xa0\x5f\x12\x1f\x36\xf5\x98\x76\xae\x73\x42\x2d\x17\x57\x47\xf0\x66\xb1\x5c\x2e\x2e\xbe\xad\x94\x50\xcb\xc5\x95\xe1\x2f\x55\xed\x0d\x7e\x1c\x1d\x76\x55\xef\x63\x06\xbc\x3e\x74\xae\xfe\x19\x0a\xa8\xab\x7f\x5a\x73\xe4\xb2\x5e\x4b\xfc\x80\x14\xe3\x71\xc7\xa2\xa7\xe6\x88\x8a\x15\x15\x64\xee\x22\xda\x37\xfc\x0c\x8a\xe7\x53\xe0\x82\x67\xdb\xe7\xf2\xd5\xbd\xae\xbf\xf2\xdb\x59\x5b\x1a\xd5\x22\xae\x2f\xb7\x79\x73\x7e\xda\x49\x29\xb7\x1a\x53\x05\x61\x89\x09\x22\xa5\x8b\x50\xb1\x1d\xa5\x59\x2e\x96\xb6\x89\x0c\x99\x65\x4b\x05\x0d\x6d\x44\x5f\x56\x5e\x7b\x7b\x96\xbb\xd8\x11\x4c\x52\x71\xe1\x13\x5b\xd7\x18\x74\x88\xcf\x8b\xf9\xf2\xfd\xe2\x14\x9f\xa6\x00\x8a\x92\xca\xc1\xa5\xf8\x9f\xd9\x2f\xd5\x9f\xcf\xaa\xbf\xbf\xf9\x70\x81\x14\x2b\xee\x86\x37\x4b\xf7\xab\x69\xf7\x7b\x71\x05\x11\x77\xbd\xaa\x4d\x90\x05\x8c\xaf\xab\x55\xeb\xe2\x0a\xa8\xc6\x1d\x29\xa3\x5c\xae\x59\xf2\x34\x7b\x2e\xd6\xe4\x46\xf3\xc6\xe7\x3f\x7f\x92\x6a\xd1\xd4\xcd\xef\xf7\xe1\x13\xb0\xbc\x05\xeb\x06\x6a\x60\xf7\x19\x4f\xe0\xb6\xf6\x57\x26\x1a\xa5\x6a\xb5\xf2\x50\x88\x87\x42\xea\x8f\x87\x42\x3c\x14\xe2\xa1\x90\x6f\x12\x0a\xd1\xed\x23\x9e\x0e\xf1\x74\x88\xa7\x43\x3c\x1d\xe2\xe9\x10\x4f\x87\x78\x3a\x44\xfb\x16\x4f\x87\x14\x8f\xa7\x43\x3c\x1d\xd2\x2a\xea\xe9\x10\xf3\xf2\x9e\x0e\xf1\x74\x88\xa7\x43\x3c\x1d\xe2\xe9\x10\x4f\x87\x78\x3a\xa4\xaa\xbf\xa7\x43\xbe\x39\x3a\x44\xa7\x2a\xf4\x98\x88\xc7\x44\x3c\x26\xe2\x31\x11\x8f\x89\x78\x4c\xc4\x63\x22\xb8\x6f\xf1\x98\x88\xe9\xef\x3d\x26\x32\x52\xd2\x63\x22\xf9\xd3\x0e\x1c\x7d\x2d\x36\xfa\xe7\x89\x1e\x2d\x5f\x65\x26\xc0\xd5\x89\x7c\xfc\x95\x8d\x63\xf6\x95\x26\x80\x34\x03\x79\x16\x2a\xd2\xdc\x40\xf6\x18\xcb\x46\x1c\xe9\x9e\x06\xa9\x30\x62\x66\x35\xf7\xf5\x5a\xf3\x28\xde\x05\x11\xcb\xc6\x73\xa9\xd8\xf9\xf0\x9d\x56\x2f\x32\x2f\xea\xb4\xd7\x30\x35\xe8\xe8\x52\x6b\xed\x54\x1c\xf7\x64\x2f\x8e\x6f\xbb\x66\x61\xc0\xc1\x34\x14\x38\xe0\x5d\xf9\x30\x6e\x9a\x80\xed\x66\xb0\x0b\x0d\x0e\xcf\xd1\xdd\xd8\x0a\x4d\x11\x32\x1c\x26\x09\x1b\x0e\xae\x43\x87\x03\x7a\x78\x01\xd5\x5b\x14\x68\x7a\x24\x27\x5e\xa3\x30\x91\xe7\x28\x60\xbc\x47\x11\x32\xb3\xf8\xd9\x3d\x48\x01\xe3\x45\x8a\x90\x79\xc7\x0d\x3d\x49\x71\x32\x6b\xaa\x82\x5e\x6f\x52\xa4\xc4\xca\xef\xb4\xd7\xa3\x14\x33\x2c\x3a\xbe\xa7\x03\x5e\xa5\x98\xa1\xa1\xf5\x3f\xed\xf5\x2c\x45\x48\xee\xf3\x41\xed\x78\x97\xa2\x6a\xdb\xeb\x87\x4a\xbc\xd2\x77\xdb\x00\xa5\x6b\xa7\xfb\xa3\x82\xa3\xf5\x8b\xe8\x97\x0a\xcf\xbc\x73\x5a\xb8\xa8\x02\xce\x4d\x15\xd1\xf1\x35\x24\x6a\xd8\x55\x15\x33\x98\x6a\x4e\xad\x83\xee\xaa\x08\x99\x7a\xc7\x56\xad\xcb\x2a\x42\x6a\xbf\x73\x6b\xd3\x6d\x15\xb5\xe2\x8f\x3a\xb8\xe2\xb5\xb9\x20\x13\x9f\x8e\x39\xb9\xa2\x35\xba\x60\xae\xd5\x45\xae\x50\xae\x9d\x5d\x01\xe1\xf0\x8a\x10\x59\x9d\xf4\x06\x9d\x5e\x51\x1f\xcf\xfa\x1d\x9d\x97\x68\x07\x07\x59\xc9\x8e\x8b\xac\x81\xf3\x2b\x6a\xc4\x4e\xa2\x7c\x06\x73\x05\x34\x6e\x70\x0d\xb8\xcb\x36\x1d\x61\x31\x67\x8b\x21\x97\x59\xbd\x33\x2c\x42\x7a\x8f\xdb\xac\x4e\x23\x8d\xe9\xb7\x21\xd7\xd9\x86\x53\x2c\x66\x92\x19\xb8\xcf\xd6\x1d\x63\xf1\xa2\x1d\xbb\xd0\x02\xc6\x8d\x16\x2f\xb3\xe9\x70\xab\x77\xa5\xb5\x92\xfa\xe3\x2b\x9d\xd4\x57\x14\xa9\x1a\xc7\x5b\x4b\x97\x5a\x40\x59\x3c\x80\xe4\x5a\x0b\x24\xf7\x5a\xa0\xb8\xd8\x02\xc5\xcd\x16\x28\xae\xb6\x40\x74\xb7\x05\x9a\xcb\x6d\xb7\x98\xb9\x56\x5f\x53\xd6\xdc\x98\x00\x76\xee\xb7\x60\xe7\x82\xdb\x5b\xdc\x4c\xcf\xdf\x57\xdc\xd0\xc0\xd0\x57\xdc\xd0\xca\x00\x0e\x5c\x72\xc1\xd6\x2d\x17\x68\xae\xb9\x40\x70\xcf\x05\x17\x2e\xba\x80\x72\xd3\x45\x2c\xa1\x8f\xf2\x5c\x61\xe0\xaa\x8b\x90\xa9\x75\xea\xed\xba\xeb\xd2\x6e\x56\x6d\xc7\xde\x86\xcb\x2e\xe6\x62\x35\xe6\xdc\x5b\x73\xdb\x45\x88\x1d\x73\xf0\xad\x5c\x77\x91\x37\x0b\x77\x4e\xbe\x80\x70\xf4\xc5\x1c\xf6\xa4\x4b\xf0\x64\xce\xbe\x80\x71\xf8\xc5\x1c\xd5\x33\x84\xd3\x2f\x42\x6e\xe1\xb6\x3a\xe6\xf8\x8b\xb9\xb2\x8e\xb9\x08\x93\xbc\x70\x41\x5d\x85\xc6\xdc\x84\x2b\x07\x60\x84\xdc\x71\x57\xe1\xc2\x09\x18\x39\x1b\xc6\xdd\x85\x95\x23\x30\xf6\x86\xe5\xce\x65\x18\xa6\x76\x1b\x06\xb7\xae\xc3\x60\xe7\x3e\x0c\x56\x2e\xc4\x80\x75\x23\x06\xaa\x0a\x95\xee\x4e\x0c\x64\x97\x62\x72\x65\x5d\xb8\x16\xc3\x33\x6b\x7b\xc9\x5e\xc6\x80\xf2\x34\xc6\x58\x64\x92\x62\x77\x37\xf0\x36\xc6\xda\xb8\x26\xf0\x38\x06\x84\xd7\x31\x6e\x8d\x37\xf7\x3c\x46\xc8\xcd\x7d\x94\xc7\xbc\x8f\xb1\x8a\xbf\xd2\x4f\x79\xd0\x03\x19\x21\xb5\xed\xab\x3c\xe4\x85\x4c\xed\xa9\x7a\x70\x86\xa6\x27\x32\xce\x64\xdb\xaf\xe1\x6e\xe9\xad\x31\x3a\x7f\x13\xbf\x65\x88\x31\xdb\xbb\xa1\xef\x32\x4a\x2b\xcf\xa2\x0d\x37\xf3\x5f\xc6\xd9\x3b\x5a\x3b\x7b\x9f\x0f\x33\xd2\xd2\x9b\xeb\xa3\x87\x55\xc9\x08\x99\xee\xa3\x2e\x80\x79\xe4\x05\xac\x66\x57\xe7\xf9\xac\xf7\x69\x46\x6a\xa4\xfb\x22\x30\x50\x82\x2f\x80\x41\x00\x86\xc9\x74\x82\x48\x4f\x68\x20\x79\x43\x03\xc5\x23\x1a\x2c\xbc\xa2\x81\xee\x19\x0d\x04\xef\x68\x20\x78\x48\x03\xc5\x4b\x1a\x48\x9e\xd2\x40\xf0\x96\x06\x9a\xc7\x74\xa7\x82\x58\x8d\x23\xcd\x73\x1a\xac\xbc\xa7\xc1\xca\x83\xba\xaf\x34\x46\xd7\x48\xf5\xa4\xee\x29\x8d\xd2\x73\xda\x78\x54\x6b\xde\x6f\x16\x7c\x1f\x10\x01\xf8\xc1\x01\xf3\x68\x18\x52\x1d\xbe\x95\x60\xfc\x80\x0c\xc8\x6f\x28\x52\x6c\x5d\xe8\xa0\xfc\xd1\x61\x27\x8e\x8c\x53\x7b\x51\x5f\x16\xaf\x31\x2f\xe8\xf4\xb2\x68\xfe\xfe\x8e\x9a\xbb\x6c\x21\xef\x3f\xdd\x7e\xbe\x49\xff\xe9\xb2\xab\xbd\xf7\xb4\xf7\x9e\xee\x3e\xde\x7b\x7a\xf4\xf1\xde\xd3\xde\x7b\xda\x7b\x4f\x0f\xbd\xf8\xdf\xd1\x7b\x5a\xb7\x6f\x7a\xdf\x69\xef\x3b\xed\x7d\xa7\xbd\xef\xb4\xf7\x9d\xf6\xbe\xd3\xde\x77\xda\xfb\x4e\x7b\xdf\xe9\x6e\x29\xef\x3b\xed\x7d\xa7\xbd\xef\xb4\xf7\x9d\x1e\x7a\xbc\xef\xb4\xf7\x9d\xf6\xbe\xd3\xde\x77\xda\xfb\x4e\x7b\xdf\xe9\xfc\xf1\xbe\xd3\xde\x77\x1a\x23\x67\x72\x5d\xaf\xf7\x9c\xf6\x9e\xd3\xde\x73\xda\x7b\x4e\x7b\xcf\x69\xef\x39\xed\x3d\xa7\xbd\xe7\x74\x7f\x51\xef\x39\xed\x3d\xa7\xc9\xa5\xbd\xe7\xf4\xc8\xe3\x3d\xa7\x47\xbe\xf9\x1b\xf1\x9c\xde\x07\xab\xcf\xed\xcb\xd5\xb4\x4e\xd4\x57\x9a\x37\x9a\xcb\x70\x7a\x81\x24\x55\xa5\x39\x84\x58\xf4\x59\x0c\x98\x3b\x26\xdd\x79\xea\xde\xac\x52\xda\xf8\xde\x8e\x73\x58\x5d\x07\x09\x5f\x61\x7d\xa1\xe9\x33\xeb\xb4\x78\x1d\xae\xf0\xe4\x9d\x54\xd6\x8b\x98\xa7\xfd\xbd\x4a\xb4\x74\x48\x79\xee\x16\xad\x7a\x31\x8b\x21\xe5\xa1\xa9\xdf\x38\x74\xd5\xea\x7b\x96\x8a\x3f\x25\xf1\x61\xb3\x95\x92\xf3\x51\x50\x7a\x09\x18\x0a\x2e\x7c\x09\x4e\xcf\xae\xe7\x6a\xbf\xfb\x70\x79\x73\x35\x7f\x7b\xf6\xee\x6c\x7e\x7a\x04\xcb\xc5\xd5\x11\xbc\x59\x2c\x97\x8b\x0b\xf7\xd9\x6d\xb5\xef\x34\x2c\xbb\x5c\x5c\x19\xfe\x52\xd5\xde\xe0\xc7\xd1\x61\x57\xf5\x3e\x66\xd4\x07\x51\xc6\x37\x86\xdb\x82\xb8\x31\xb1\x4c\x96\xf9\xf3\x9f\x28\x13\xe5\xb2\x5e\x4b\xfc\x80\x14\xe3\x71\xc7\xa2\xa7\xe6\x88\x92\x17\x71\x8c\xba\xa9\x6f\xf8\x19\x14\xcf\xa7\xc0\x05\xcf\xb6\xa6\x3e\xf0\xb6\x8b\xcb\x75\xfd\x95\xdf\xd8\x02\xd3\xa8\x1b\x71\x91\xb9\xcd\xdb\xf4\xd3\x4e\x4a\xb9\xd5\x58\x8f\x08\xeb\x4c\x10\x29\xd5\x0b\x8f\xb2\xf0\x49\xd9\x5c\x63\x69\x2a\x2a\x3d\x3d\x0d\x85\x3e\x6e\x83\xd5\x56\x3a\xc0\x8b\x8b\x6d\xcd\x1d\x75\xcf\x72\xbf\x51\x42\x4a\xd5\xb8\x70\x11\xaf\xab\x3a\x3a\x2e\x52\x17\xf3\xe5\xfb\xc5\x69\x63\x4d\x33\x7c\x41\x5e\x52\x79\x49\x15\xff\x33\xfb\xa5\xfa\xf3\x59\xf5\xf7\x08\x2f\xa9\xbc\x84\xb8\x82\xde\x2c\xdd\x2f\xa9\xdd\xef\xc5\x15\x44\xa4\x28\xaa\xda\x04\x59\xc0\x38\xcb\x52\xd5\xba\xb8\x02\xaa\x71\x47\xca\xa4\x9a\x94\x8b\xd3\x1e\x47\x75\x49\x1e\xbf\xd2\x71\x94\x54\x95\xa6\xcd\x62\xbf\x0f\x9f\x80\xe5\xcd\xd8\x70\x26\x60\xf7\x99\x44\x37\x4c\x96\x07\x95\x21\x27\x48\xe5\x6a\x70\xc8\x4c\x54\x54\x48\xa8\x8f\xc2\x5c\xd9\xee\x37\x14\xd6\x6a\x0a\xce\xca\x98\xb1\x32\xdf\xf8\x9f\x9b\xaf\x32\x66\xab\xcc\x99\x08\x33\xae\x0a\x6f\xd2\x18\x62\xaa\x5a\xa4\x94\xa1\xc4\x3e\x9e\x4a\x43\x49\x19\x4a\x1c\x84\x89\x74\x84\x94\xa1\xdc\x3e\x8e\xaa\x97\x8e\x32\xed\xfe\x16\x43\x35\x44\x46\x99\x36\x6a\xcb\x40\x40\x76\x24\xa0\x12\x51\xb6\xab\x0b\x91\x84\x7a\xbe\x9d\xc4\x82\x82\x42\x10\x50\x18\x38\xd4\x84\x7e\x6a\x30\x4d\xc6\x27\xdb\x7e\xf2\xa9\x87\x67\x32\x9e\xfa\x5d\xea\x69\x80\x65\x42\xdd\xf6\x8b\x3a\x19\x70\x4c\xa6\xd3\xbf\xb2\x06\x8f\x33\x4c\x86\x32\xfb\x2c\xc1\x5a\xfb\x2e\x42\x71\x39\x05\xbb\x64\xca\x2d\xa1\xdd\xeb\x06\x99\xa5\x16\x89\x84\xbf\xe2\xf5\x66\x06\x2e\x28\x24\xd3\x66\x1d\x65\x95\x4a\xc3\xb1\xf9\x84\x35\xe1\x94\x5c\x24\xf3\x37\x31\x18\x63\x2c\xe6\x0d\x3e\x69\x98\x3a\x32\x94\xa8\x65\x93\xfa\x88\x23\xd3\x19\x3b\x62\x29\xee\x5c\xa5\x4d\x67\x42\x8d\x49\x32\x22\x8d\x70\x72\xc5\x4d\x7b\x22\xca\xc8\x98\x30\xd2\x73\x43\xb8\xb7\x34\xe9\x22\x3d\x33\x44\x96\xf8\xea\xc7\x6e\xcd\x2b\x5e\x08\x27\x96\x48\x15\x61\x74\x16\x58\x9a\x08\x4f\x12\xa1\x29\x22\x34\x41\x84\xa6\x87\x28\xe4\x10\x81\x1a\xa2\x12\x43\x64\x5a\xc8\x82\x14\xb2\xa0\x84\x2c\x08\x21\x0b\x3a\xc8\x82\x0c\xb2\xa5\x82\xac\x88\x20\x02\x0d\x84\x25\x81\xac\x29\x20\x73\x02\x48\x72\x3d\xa6\xc7\x29\x03\xfa\x47\xcf\xf4\x98\xea\x16\xda\xe4\xcf\x10\xcf\x83\x51\x81\xe4\xd4\xcf\x38\xcb\x83\x3d\xb1\x9f\x8c\x72\x3c\x18\x3f\x83\x26\xa0\xe0\x84\xe1\x31\xe5\x77\x72\x2a\xc7\xf4\xfb\x27\x62\x77\x8c\xb9\x1d\x38\x33\x3d\x50\x9a\x33\x3b\xa5\x27\x87\x69\x97\x0d\xf3\x3a\xe3\x14\x8e\xe1\x7b\x9a\x18\x8c\x01\x81\x63\x7a\xef\x28\x38\x1d\x03\xfa\x06\x71\x43\x2c\xee\xd8\xe3\xe4\x0d\xa2\x9d\x8f\x5c\x53\x37\x93\x12\x37\x0e\x69\x1b\x0b\xd2\x86\x4e\xd9\xa0\x08\x1b\x02\xb0\x42\x25\x6b\x68\x54\x8d\xad\xc6\x90\x44\xd3\x3c\x9f\xbe\x90\x4c\xd2\x98\x53\x34\x8a\x8d\x31\xde\xc9\x8c\x08\x1a\xcc\x64\x9d\x80\x9e\x31\x25\x67\x20\x30\x3d\x13\x98\x53\x33\x05\x0b\x63\xac\xcd\x19\x24\x66\x5a\x1c\x8c\xa1\xd0\x21\x5a\xa6\xc3\xc0\x20\x7a\xbe\x87\x94\xe9\xe5\x5f\x0c\x45\x37\x29\x19\x27\xb1\x9d\x0c\xe3\x3a\x51\x58\x92\x21\x9d\x68\x93\x79\xc9\x49\x16\x44\x03\x8f\xf2\x2e\xf9\xdb\x1d\xea\x33\x2b\x82\xc5\xf8\xcc\x3d\x49\xc0\x24\xc3\x60\x49\x98\xc0\x7e\x66\x81\x92\x0a\x6a\xc5\x74\x6e\xe9\xd8\x96\x1e\x62\xc5\x74\xfc\x77\xb8\x16\x37\xb4\x0a\x46\xd3\x84\xa4\x54\xf0\x84\x0a\x9a\x4e\xa1\x92\x29\x44\x2a\x05\x4b\xa4\x60\x69\x14\x34\x89\x82\xa7\x50\xb0\x04\x0a\x81\x3e\x21\x92\x27\x54\xea\x84\x4e\x9c\xd0\x69\x13\x3a\x69\x42\xa7\x4c\xe8\x84\x89\x1d\x5d\x82\x26\x4b\xb2\xe6\x39\xf6\x9c\x45\x9b\x03\xdb\xf0\xe1\x55\xc6\xf8\x8c\xde\x3a\x9b\x2f\xf5\x2f\x1b\x14\xd1\x3c\xf7\x2a\xfc\xa2\xf4\xab\xbc\xe7\xd9\x6a\x6b\x6a\xc4\x3c\x44\x41\xb6\x78\xe0\x49\x12\xac\x27\xfa\xbe\x0f\xb5\x37\x98\x7f\x94\x38\x0a\x89\xba\x89\xa3\x80\xdc\xc6\x73\x55\x84\x52\x1f\xc8\x4f\xcc\x29\x88\x91\x21\x50\xbb\x10\x47\x71\xa4\x6e\xa0\xf9\x75\x56\xca\x97\xae\x43\x71\x5e\x3f\x60\xd1\x93\xfc\xeb\x11\xa1\x52\xd5\xc5\x56\xd2\x83\x35\x92\x08\x46\x5e\x21\xe5\xfb\x23\x2a\xaf\xfc\xbc\xc5\x76\x56\x28\x0d\x46\x64\x32\xf5\xcb\xdf\x6e\xc5\xfb\x6f\xcd\x81\xa0\x84\xdf\x7f\x7a\xf8\xe1\x65\xc2\xd3\xec\xe5\xc3\x0f\x2f\x0b\x2c\xe7\x44\x1d\xb2\x4e\xf3\x46\x8d\xc7\x79\xa1\xfc\x76\x1f\xc1\xed\x45\xab\xa8\xd6\x50\x99\xf1\x2f\x99\x7e\xc4\x8c\xfa\x48\xb6\x67\x00\xff\x32\xf2\xbb\xb1\x1b\x68\xaf\x84\xd6\x44\x49\xd8\x63\x3e\x82\xc5\xb1\x73\xc7\x92\xcf\xeb\xf8\x31\x82\x75\x90\xee\x43\xa6\x94\xa9\xfc\x4b\x26\x8e\x3e\x62\xb8\x89\x4b\xda\x40\xbd\x56\x71\x74\x1f\x06\xab\xac\x47\xcd\x70\x0c\x5f\x9e\xde\x6e\x59\xa2\xaf\xd8\x31\xa4\xc5\xad\xb9\xe7\xdf\xef\x42\x16\x7d\xee\xf9\xb7\x30\xde\xa4\x57\x2c\xe2\xfa\x50\x32\xe3\x0e\x92\xf9\xd7\xf5\x4f\x78\xa3\xc9\xde\x76\x4b\x52\x42\x7b\x7f\xde\x99\xdf\xa2\xad\x8b\xaa\xe4\xb7\xcc\xbc\x27\xfa\xcf\x7e\x39\x3e\xe1\xb4\xe2\xef\xa4\xcc\xf1\x5f\x9b\x8c\x42\x25\x6b\x50\x81\xa1\xe1\x83\xda\x8d\x11\xa4\xf9\x87\x66\x75\x4b\xfc\xc0\xfc\x2d\xdc\xdd\xdf\x2d\xae\x2f\x66\xcb\x26\xc2\x73\x31\xbb\xfe\xf9\x74\xf1\xf1\xf2\x08\xae\x67\x1f\xfb\xad\xb7\xc3\xe7\xe8\x63\x8d\xe8\x81\x1f\x17\xef\x1c\xf8\xc9\xf5\xec\xa3\x6e\x55\x09\xb2\xb0\x67\x23\x1a\xed\xda\xce\xc6\x9a\xf5\xb4\x59\xa3\x07\x16\xf2\xbf\x2c\x54\xcb\xb6\x7c\x7f\x71\xfb\x7d\x94\xfd\xaa\x1b\x8a\xf9\xd4\x76\xb3\xfe\xfd\x32\xb0\x4e\x18\x0f\xbe\x21\x21\xad\x55\x70\x25\x7e\xd8\x32\x47\x0c\xe4\xbd\x30\x5c\xed\xc6\xd6\xb3\xac\x6f\x8d\xb6\x59\xea\x86\x73\xc6\x1c\xcb\xaf\xba\xe9\x0b\x96\x67\xb0\x4e\x8a\x96\x52\x03\x64\x40\x99\x6c\xc4\x04\xb4\x17\xcb\x9a\x64\xfb\x95\x27\xef\x7c\x23\xa1\x8d\xc1\x70\xaa\xd6\x5b\x88\x55\x21\xa9\x8b\x14\x23\x5f\x7e\x78\xff\xb9\xd1\xcc\x05\x7f\x17\x4f\x75\xa6\xbc\x88\x47\xce\x92\x36\x2d\x27\x84\x8f\x6a\xa0\xbb\x7e\xaa\x6a\x56\x89\x6f\xd6\x61\x48\x8b\xd3\x39\x06\x42\x7a\xbb\x38\x5f\x5c\x1f\xc1\x2f\x9f\xae\x67\xbf\x1e\xc1\xcd\x72\xb6\xbc\x19\xf6\xbb\x19\xd7\x82\x1c\x77\x2a\x31\xf2\x73\x59\x87\x91\xdf\xc8\x0a\x8e\xfc\x46\xd6\xbe\xe7\x37\xc5\xfc\x1c\x9b\x5c\xc3\x8e\x07\x6d\x94\x78\x68\xd2\xc3\x30\x9a\x2f\xef\x1a\xe5\x29\x44\x39\xca\x06\xe9\xd8\x7c\x40\x9a\xb8\x70\x66\xad\x51\x53\x96\x21\x93\x84\x9d\x13\xa3\xcd\x68\x96\xb2\xeb\xb8\x7d\x9d\x1e\xf8\xad\x29\xda\x13\xf2\x0d\x8f\xd6\x4b\xbe\xdb\x87\x2c\x1b\x59\x63\x50\xd6\xb3\xd6\x48\x3a\x6f\xbc\x67\xa4\x6c\x73\x49\x98\x49\x93\x8f\x28\x56\xdc\x38\xc4\xda\x1a\xb1\x9d\xf8\xe3\x6d\x75\xe9\x1f\x57\x9f\x76\x12\x62\xc8\x31\x9a\x7b\x2b\x04\x29\xa4\xdb\xf8\x10\xae\xa5\x8a\xdb\xd4\x44\x28\x4d\x24\x92\x0a\xda\xc7\x61\xee\xb7\x9d\x9f\x79\xa4\xa6\xfb\xf6\xff\xfb\x47\xc8\xee\x78\xf8\x49\x34\xc3\x3f\x6f\x8f\x14\xc1\x69\x20\x56\x3a\x8e\xa7\x71\xf8\xc0\x0b\x1a\x46\xca\x79\xf1\x22\x55\x4b\xe1\x09\x0c\x2f\x63\xbb\x20\x9a\xe1\x80\x2d\x7a\xe7\x5e\x74\xde\x85\xe9\xe0\xe6\xb1\x31\x8c\x1f\x79\x02\x77\xf1\x41\x99\x61\x2a\x7b\xd6\x68\x9b\xdd\x8b\x39\xc4\xa3\xd5\x53\xbe\xf7\x06\x69\xd9\xbf\x47\xd2\x15\x83\x8b\xfa\xf1\x35\xdc\x3d\xe5\xaa\xf3\x27\x93\x1e\x96\x41\x3e\x82\x28\xd8\x1d\x76\x35\xa3\x95\xd2\xd8\x17\xf1\x08\xa4\x05\x0e\xe3\xe7\xa3\xf4\x4c\xef\xe2\x04\xf8\x17\x26\xaa\x76\x04\xc1\x7d\x0d\x05\x4b\x61\x7f\xb8\x0b\x83\x74\xcb\x45\x2b\xac\x38\xf0\x87\xe1\xf9\xae\x9e\xef\x5f\x89\xaa\x1e\x32\x9e\x1e\xe5\xf6\xa1\x20\xfa\xa4\x81\xdf\xaa\x71\x3e\xde\xac\x0a\xb4\xaa\x24\x9f\xc0\x59\x06\x8f\x52\x40\x14\x67\xb0\x63\x9f\xc5\x57\x47\x29\x2f\x35\x66\xa3\x22\x95\x67\x48\xb0\xc9\xbb\x97\x65\xd2\x71\x48\xc9\x2f\x11\xbb\x11\x62\x60\x1f\xc6\xd9\x52\x8c\xd8\xc9\x46\xf5\x55\xfe\x06\xd3\x52\xc8\xcd\xa0\x10\x6f\x60\xa2\xd7\xdd\x73\xcb\xd1\x5d\xf6\xa5\x68\x91\x8c\x17\xc6\xcb\xd1\x4e\x50\x1b\x71\xf7\x80\x75\x75\xbe\x58\x7e\x5a\xfe\x7a\xd5\x3c\x65\xc1\xf9\x99\x49\x6a\x8c\x9b\xe5\xec\xed\xcf\xf3\xd3\x4f\xb3\xeb\xf9\xec\xa8\xfc\xbf\x37\xb3\xeb\x23\x78\x3f\x9f\x2d\x2f\x66\x57\x63\x4e\xcf\x26\x46\xa8\x63\x7d\x25\x47\x4b\x89\x6f\x18\xfd\x51\xfd\x13\x8c\x7f\xfc\x66\x36\x66\x35\x3a\x2e\x1a\x60\xf8\x18\xdf\xdc\xe0\xcd\x06\xb7\x11\x43\x3d\xac\x2b\x9f\x68\x8c\xe3\xde\xd2\x73\x98\xcc\xa3\x54\x8b\x45\x5d\x2e\x9b\x52\xb9\x88\x58\x6b\xe5\x34\xb9\x4f\xe2\x9d\x5c\x12\x6f\x32\xb6\xfa\xbc\x4e\x82\x07\x9e\xe4\xb1\x9b\x52\x98\x5d\x9d\x8d\x86\x5c\x42\xa0\xd2\x19\x3a\x34\x13\x3a\x01\x6a\x6f\x6f\x1a\x87\xd5\x72\xd4\xa5\x84\x37\x37\xba\x59\x95\x82\x3d\x4b\xd8\x8e\x67\x3c\x49\xd1\x76\x97\xe2\x31\x4f\x7a\x8b\x48\x7b\x8b\xcf\x4d\x8a\x4e\x7d\x4b\x48\x7e\x6b\x9d\xfe\x76\xb2\xee\xc7\x57\x04\x93\xfa\xd6\x50\x24\x94\x87\x28\x83\xe4\xb7\xc6\x32\x8b\x24\xb9\x06\xe9\x6f\x8d\x65\xe2\xd2\xe4\x52\x12\xe5\x5a\xa4\xca\xb5\x8f\x95\x6e\x97\x2e\x77\xba\x84\xb9\xd3\xa4\xcc\xfd\x6a\x49\x73\xa7\x49\x9b\x6b\x98\x38\x17\x25\xb1\x4b\x34\xeb\xc2\x3c\xa0\x44\x6a\x42\x42\xb8\x48\x9e\x3b\x5d\xfa\x5c\xc3\x04\xba\x28\x89\xe3\x41\x22\xb0\x12\xcd\x42\x45\xe0\x06\x69\x17\x10\xb6\xcc\x3c\x61\x93\x48\xd7\xd5\xea\x66\x91\x4c\xf7\x39\x76\x61\xcb\x34\xba\xe6\x61\x24\xb0\x2b\xcb\x78\x2a\x5d\xdc\x78\xed\x06\x9e\xd0\x84\x94\x40\x89\x1c\x0b\x3f\x81\xcf\xfe\x00\xae\x83\x50\xa8\xc7\x28\x14\x05\x72\xed\xd3\xa5\xdf\xa5\xc5\xf2\x51\x8f\x51\x82\x02\x94\xc4\x31\x57\x6d\x8b\xd0\xff\x23\x4e\xdb\x65\x88\x0a\xf4\xde\xe7\x30\x9c\x85\x7a\xcc\x82\x5a\x20\xcf\x12\x83\xa9\x78\x49\x69\x0f\x60\xa2\x64\xbc\x86\xde\xe5\xc8\xbd\x74\x3c\x24\x86\xc5\x6e\xea\x24\x30\x86\x7a\xdc\x87\xc7\x50\xcf\x14\x41\x32\xd4\x63\x1c\x2a\x83\xb0\xfe\xbd\x36\x0b\x98\x81\x92\xdc\x93\xc6\xb7\x15\x36\x83\x22\xb2\x37\xc4\x86\x2e\x78\x06\xe5\x05\x0e\x03\x6d\x34\xe4\x3a\x0c\xb7\xd1\x27\xd7\x49\xd0\x8d\x86\x70\x8b\x84\xbe\xd8\x04\x1e\xd4\xa4\xbe\xd4\xb4\xbe\xc4\xc4\xbe\xc4\xd4\xbe\xc4\xe4\xbe\xf4\xf4\xbe\xe4\x04\xbf\x76\x29\x7e\x2d\x93\xfc\x5a\xa7\xf9\xb5\x4e\xf4\x6b\x9d\xea\xd7\x3a\xd9\xaf\x75\xba\x5f\x37\x09\x7f\x1d\xa4\xfc\x25\x27\xfd\xa5\xa5\xfd\x75\x94\xf8\x77\xa2\xd4\xbf\xd3\x24\xff\x9d\x22\xfd\xef\x24\x09\x80\x27\x4b\x01\x6c\x98\x04\x18\x25\x71\x82\x10\x22\xea\x31\x0b\x24\x82\xd4\xc8\x3e\x43\x2a\xe0\x69\x92\x01\x4f\x97\x0e\x78\x92\x84\xc0\x86\x29\x81\x69\x17\x1c\x87\x21\x49\xd4\x33\x41\x60\x92\x5c\xb0\x49\x78\x12\x7a\x5f\xf5\x07\x29\xc1\xd5\xb2\x19\xd0\x64\x30\x54\x09\x6e\xc8\x0e\xf1\xd3\xf5\x80\x25\x16\xaa\x6d\xb4\x42\xd7\x3a\x99\xb0\x6d\x3a\x61\x42\x42\x61\xb2\x22\xd9\x2e\xa9\xb0\x4d\x5a\x61\x57\xba\x6f\x72\x6a\xe1\xe7\xd0\x7c\x5b\x25\x15\x36\x0e\x88\x82\x55\x82\x9a\x25\x16\xc6\xad\xbd\x35\xa5\x79\x7f\x70\x14\xdc\xfe\xde\x0e\xa4\xa2\x0d\x91\x82\x56\x29\xeb\x3e\x5b\xa7\x6b\x42\x37\x6a\x5f\x50\x95\x2a\x5c\x0a\x76\x21\x1f\x0d\xad\x82\x49\x05\xa3\x1e\xa3\x00\x2b\xb8\x36\x1d\x4c\x48\xdc\xec\x33\x9c\x9e\xae\x2f\x25\x71\x5d\x8f\x4f\x18\xa4\xfd\x49\x89\x4d\xbd\x16\xeb\xcf\x68\x5a\x62\xb4\x44\xe7\x41\x5a\xd4\xe3\x3a\x35\xb1\x61\x72\x62\x9a\x1d\xdd\x75\x7a\xe2\xa9\x12\x14\x4f\x91\xa2\x78\xca\x24\xc5\xd3\xa4\x29\x76\x98\xa8\x98\xa2\xe9\x24\x24\x2b\xa6\xa6\x2b\x26\x26\x2c\xb6\x4b\x59\x6c\x95\xb4\x98\x96\xb6\x98\x96\xb8\x98\x98\xba\x98\x9a\xbc\x98\x96\xbe\x98\x9c\xc0\xd8\x2a\x85\xb1\x5d\x12\x63\xdb\x34\xc6\xb6\x89\x8c\x6d\x53\x19\xdb\x26\x33\xb6\x4d\x67\xec\x22\xa1\xb1\x45\x4a\x63\x5c\x52\x63\xbb\x00\x96\xe8\xc4\xc6\xdf\x50\x6a\xe3\x49\x92\x1b\x13\xd3\x1b\xab\x87\x96\xe4\x38\xff\x16\x6b\x8f\x6a\x52\x96\xe1\x96\x24\xb7\x57\x5b\x8b\x1a\x39\xce\x7e\xac\x1e\x8a\x2f\x32\x32\x13\xb2\x7a\xdc\xe8\x2a\x70\x59\x91\x5b\x22\xa6\xef\x49\x7c\x72\x64\xf5\x18\xa6\x48\x46\x9e\xd7\x43\xbe\xca\xc6\x13\x25\x23\x4d\x06\xcd\xa4\xca\x23\xe9\x92\x71\xb6\x23\x6c\x6a\x65\xf5\xe0\x8f\xbb\xf4\x34\xcb\x85\x04\xd3\x64\xcb\xc5\xef\x8d\x53\x2e\xab\x87\x90\x78\x59\x3d\xf8\xf4\xcb\xea\xc1\x27\x61\x56\x8f\x75\x2a\x66\xf5\x98\x27\x64\xc6\xea\x94\x2c\xd2\x32\xab\x87\x90\x9c\x39\x7f\xb9\x93\xf5\x0e\x9f\xa8\xb9\x25\x66\xfa\x35\x8f\x96\xaf\x59\x3d\xc6\x59\x9b\xb1\xfd\x2e\x4d\x82\x26\xb9\x9b\xd1\x1a\x20\x65\x7f\x1d\xcc\xe0\x8c\x96\xd9\x1a\xe9\xdd\x3c\xce\x28\x89\x26\x39\x9f\x1b\xd9\x9c\x51\xd2\x0d\x32\x3f\x53\x72\x3a\xab\x07\xbf\x96\x13\xf3\x3b\xb7\x8a\xa3\x6f\xdb\xc8\x5c\xcf\xed\x62\x48\x45\x02\x32\xef\x73\xab\x98\x51\xf6\x67\xf5\xd0\x72\x40\xab\xc7\xfe\xb4\x4e\x4a\xc2\xdc\x92\xe4\x76\xbd\xb3\xa8\x11\x2a\x39\xb4\x71\xb7\xde\xd6\x8a\xd6\xd7\x47\x8f\x1e\x6a\x1f\x8f\x1e\x1a\x3e\x1e\x3d\xf4\xe8\xa1\x47\x0f\x3d\x7a\x38\xb0\xef\x79\x06\xd1\x33\x88\x9e\x41\xf4\x0c\xa2\xf9\xe3\x19\x44\xcf\x20\x7a\x06\x51\x3d\x9e\x41\xec\xc8\xf5\x0c\xe2\xd8\xe3\x19\xc4\xc1\x62\x9e\x41\xc4\x09\xf0\x0c\xa2\x67\x10\x3d\x83\xe8\x19\x44\xcf\x20\x7a\x06\xd1\x33\x88\x9e\x41\xf4\x0c\xa2\x81\x00\xcf\x20\x4e\x52\xe5\x6f\x9e\x41\x74\x9a\x97\x5d\x3d\x1e\x46\xf4\x30\xa2\x87\x11\x3d\x8c\xe8\x61\x44\x0f\x23\x7a\x18\x51\xff\x78\x18\xd1\xb0\xb0\x87\x11\xdb\xa5\x3c\x8c\x68\x55\xde\xc3\x88\x46\x30\x62\x3b\x2d\xcc\xb5\x38\xd4\x3c\x7f\x6e\x18\xf9\x5a\x8c\x18\xb7\xb7\x23\xd3\xd7\x37\xae\x3c\x57\x9a\xf4\x30\x0c\xe4\xa9\x10\xee\x78\xf6\xc8\xb9\x99\x1f\x44\xf6\x18\x6b\x88\x37\x23\xad\x05\xd6\xff\x75\xcd\xa3\x78\x17\x44\x2c\x8b\x9f\x91\x5a\x3c\xad\x5e\x8a\x15\x30\x41\x2f\xe3\x6b\xd3\xb1\x16\xd4\x5a\xb1\x38\x66\xca\x5e\x37\x3d\x48\x60\x92\x02\x01\x2e\x31\x10\x50\x9d\xa2\xf1\x0e\xf2\x40\x1b\x1c\xe0\x22\x51\x10\x3c\xdf\x20\xa1\x55\x6e\x92\x04\x42\x30\x51\x12\x21\x40\x24\x12\xa2\xdc\xc1\x30\xc9\x84\x80\x38\x80\xc1\xce\xb3\x1f\x6c\xb4\x95\x0e\x3d\xfc\xc1\xcc\xcb\x1f\x29\xb1\x32\x43\xa4\x83\x9e\xfe\x68\xb1\x75\x32\x40\xef\xed\x8f\x16\xf9\x0c\x74\x00\x18\x12\x02\x68\xa1\x05\x40\xd7\x4f\x09\x10\x9a\x43\x79\x99\x8f\x91\x02\x68\xc1\xca\xe8\x39\x44\x0b\xa0\x45\xb6\xe8\x02\x0d\x31\x80\x16\xd9\x4f\x18\xd4\xd6\x3c\x7c\xe7\xf7\x52\x06\x24\xa5\x80\x7a\x06\x48\x03\x2a\x18\x01\x52\xe1\xd6\x4b\x1b\x58\x4a\x76\x41\x1c\x80\x35\x75\x00\x4e\x57\x5e\x2b\xfa\x00\xbe\xca\xa9\xc2\x1a\x44\x00\x53\x18\x01\x3f\x42\xe2\x11\x20\x81\xb6\xec\xb2\x61\x28\x81\xb8\x94\xc5\xf7\xa3\x60\x02\x7e\xdb\x2c\x3c\x36\x86\xe1\x04\xe2\xaa\xbe\x9e\x00\x50\x80\x49\x20\x05\x98\x04\x54\x00\x33\x58\x01\xbf\x69\x74\x6d\x1c\x4d\xbb\x05\x71\x1c\xd4\x00\x87\x0e\xb4\x40\x1d\x5a\x6d\xc8\xa1\xd9\x04\x94\xdd\x42\x0b\x3a\xd4\xc6\x16\xf9\xe3\xf5\xb0\x83\x02\x18\xd0\x42\xdd\x03\x0f\x30\x09\xf4\x00\x86\xe0\x03\xe5\x0c\xe2\xdc\x34\x03\x3a\xf3\x4c\x07\x80\xb0\x3a\x84\x9d\xe8\x21\x08\x5a\x35\xbb\xd0\x44\xcd\x56\x83\x5f\xf6\x06\xc1\x89\x02\x86\x40\x8b\x75\x0f\x4f\xc0\x34\x00\x05\x3c\x07\x44\x01\x13\x82\x14\x30\x21\x4c\xa1\x97\xed\x10\xa8\x00\x47\x50\x05\x10\xac\x8c\x60\x01\x57\x80\x05\x60\x01\x74\xc8\x02\xe8\xa0\x05\xd0\x61\x0b\xb0\x02\x2e\xc0\x06\xba\xe8\x16\xc6\x5a\xc9\x34\x12\xb0\x86\x3a\x70\x01\x60\x80\x0b\x08\xa3\x57\x08\xc6\x6e\xd6\x27\x04\x65\xbc\xeb\x13\x82\xb2\xe0\x81\x33\x28\x03\xdc\x80\x19\x60\x03\x67\x00\x19\xd0\x00\x77\x90\x06\x4c\x07\x6a\xc0\x64\xb0\x06\x98\x01\x1b\x14\x5d\xcd\x28\xb4\x81\x3f\xd8\x2b\xc8\x63\x1c\xdc\xc0\x5f\x43\x4a\xd0\xa3\x1f\xde\xa0\xde\x98\xa4\xce\xb9\x07\xe0\x40\xcb\xd4\x01\x1f\x0d\x88\x83\x70\x05\xe9\x81\x3e\x4a\x90\x03\xdf\x9a\x93\x82\x1f\x60\x02\x7f\x90\x2f\x8d\x43\x00\x08\xfe\x84\xdf\x02\x46\x34\x10\x08\x61\x66\x69\xa0\x91\x71\x10\x04\xfd\x9e\x06\x8f\xa1\x85\x41\xf0\xcd\xd1\x80\x47\x86\x80\x10\xca\x80\x68\x56\xb8\x0d\x85\x50\x67\x6f\x73\xe2\x36\xc1\x10\xaa\x06\xbb\xbb\x34\x12\x91\x1a\xe8\x68\x28\x6d\x61\x12\x98\x0c\x28\x01\x17\x50\x09\xb8\x00\x4b\xc0\x01\x5c\x02\x34\xc0\x04\xec\x8c\x07\xb6\xa0\x09\x58\xc2\x26\xe0\xd4\xf6\x61\x01\x9d\xc0\x57\xb1\x7c\x58\xf2\x27\x60\xc6\xa0\xa0\xc7\xc4\x56\xc5\xab\xee\xe7\x50\xf0\x6b\x5f\xc7\xdc\xd1\xcb\xa2\xa0\x65\x2b\x76\x65\x94\x47\x41\xcb\x95\xfc\xca\x74\x4c\x0a\x4c\xc3\xa5\x80\x21\x9b\x42\x3b\x28\xba\xe7\x53\xc0\x88\x51\xa1\xd9\x97\x24\x8f\xd1\xcb\xa9\xfc\x8b\xd8\x7f\xc8\xfc\x07\x8c\x05\xab\x9a\x91\xb8\x1f\xc8\x03\x56\x29\xc6\xa5\x97\x5b\x71\x62\xb3\xea\xb2\x2b\xd4\x53\x57\x65\xad\xd1\xf0\x2b\x94\x91\xd0\x6b\x5c\xa9\x19\x4b\xdc\x1e\x94\x6c\x1d\x1d\x7a\x23\x4b\x51\x2c\x22\x53\xb0\x2f\x30\x19\xff\x02\x6e\x19\x18\x20\x6b\xa8\x49\x2c\x0c\x58\xf0\x30\x40\x67\x62\xc0\x9a\x8b\x01\x5b\x36\x06\xc8\x7c\x0c\x90\x19\x19\xa0\x73\x32\x60\xc1\xca\x00\x99\x97\x01\x1b\x66\x06\x6c\xb9\x99\xae\x00\x9a\x4a\xde\x8e\x9f\x01\x07\x0c\x4d\x9f\x0c\xbc\x16\xdc\x8e\xa5\xe9\x91\x41\xd0\xc6\xdb\x33\x35\x9a\xba\x60\x92\x7c\x01\x3a\xd1\x17\x38\x8b\x40\x80\x4a\xc5\x04\xdf\x56\xd2\x2f\x98\x2a\xf1\x17\xd8\x25\xff\x8a\x0e\x3b\x71\x98\x7e\x4e\x76\xe6\xb2\x78\x25\xb6\xf8\x04\x97\x78\x6c\x5d\x3a\xc6\x9b\xb2\xfd\x3c\x35\xf3\x6f\x49\xcd\x94\x03\xc4\x33\x33\x9e\x99\x19\x7d\x3c\x33\xe3\x99\x19\xcf\xcc\x78\x66\xc6\x33\x33\x9e\x99\xc1\x9d\x29\x3c\x31\xe3\x89\x19\x4f\xcc\x78\x62\xc6\x13\x33\x9e\x98\xf1\xc4\x8c\x27\x66\x3c\x31\xe3\x89\x19\x4f\xcc\x98\x16\xf5\xc4\x8c\x27\x66\x3c\x31\xa3\x7f\x3c\x31\x33\xf0\x78\x62\xc6\x13\x33\x9e\x98\x29\x2a\xed\x89\x19\x4f\xcc\xd4\x1f\x4f\xcc\x78\x62\x06\x2f\xc4\x13\x33\x9e\x98\x31\xb6\x7b\x78\x5e\xc6\xf3\x32\x9e\x97\xf1\xbc\x8c\xe7\x65\x3c\x2f\x03\x9e\x97\xf1\xbc\xcc\x68\x51\xcf\xcb\x10\x4a\x7a\x5e\xc6\xa8\xb0\xe7\x65\x3c\x2f\x33\x5e\x17\xcf\xcb\xfc\xe7\xf2\x32\xfb\x60\xf5\xb9\x7d\xb9\x7d\x3e\x74\xe6\x4a\xf3\x76\xac\xa4\x09\x2e\xf6\x16\xd5\x6a\x0e\x39\x16\x7d\x16\x03\xec\x8e\x49\x67\xc3\x4e\xfe\x20\xd3\x13\x0a\x05\x1c\x58\x07\x09\x5f\xd1\xa8\x17\xdb\x79\x7a\x5a\xbc\x9a\x22\xe2\x99\xba\xb3\xac\xa3\x4d\x42\xdd\x17\xef\x95\x5a\xe5\x90\xf2\x1c\x80\x51\xfd\x8d\xb4\x5a\xa5\x3c\xe4\xab\xac\x93\x2d\x1f\xf6\x2c\x15\x7f\x4a\xe2\xc3\x66\x8b\xbe\x80\xe5\x23\xac\xe3\x29\x04\xa7\x67\xd7\x73\xb5\x23\x7f\xb8\xbc\xb9\x9a\xbf\x3d\x7b\x77\x36\x3f\xc5\xad\x37\xcb\xc5\xd5\x11\xbc\x59\x2c\x97\x8b\x0b\x8c\x13\x08\x3e\x31\xa3\xb6\xae\x28\x09\xcb\xc5\x15\xea\xf7\xea\xab\x8c\x8b\x44\x87\x5d\x35\xb2\xf0\x33\x2d\x88\x32\xbe\x41\x6d\x69\xe2\x86\xca\x32\x59\xf2\xcf\x7f\xa2\x4f\xd1\xcb\x7a\xbd\xa9\xc3\x5f\x8c\xfe\x1d\x8b\x9e\x9a\x23\x57\xaa\x54\xf0\x29\xa5\xdb\x83\x1d\xb9\x44\x42\x31\xf9\x2e\x78\xb6\xc5\x31\x52\x6e\x16\xbd\xeb\xfa\xeb\xbf\xe1\x85\xaf\x51\x4f\xab\xc5\xef\x36\x6f\xf1\x4f\x3b\x29\xeb\xb6\x66\x6f\xc5\x76\xbe\xd4\x44\x36\x55\xc9\x4a\x89\xc6\xa3\x2c\x7c\x52\x1e\x11\x31\xda\xb8\x9a\x6d\x0b\x23\xfa\xe3\x36\x58\x6d\x4b\x8c\xa9\xe6\xfa\xbf\x67\x09\x5e\x66\x6b\xb8\xe7\x18\x10\xd5\x98\xd6\x59\xa0\x2f\xe6\xcb\xf7\x8b\xd3\xc6\xea\x5c\xfc\x9d\xf4\xd0\x44\x49\x2f\x0a\xce\x7e\xa9\x84\x9c\x5d\x96\x7f\x96\xde\x99\xf9\x9f\xcf\x67\xcb\xf9\xcd\x72\xda\x05\xbd\xfb\x6d\x94\xe2\xe8\xe4\xb0\x55\x2b\x90\x8a\x21\xf3\xde\x56\x6d\x4b\x29\xa6\xba\xc1\xa8\xa4\x22\xda\x58\xf2\x34\xc3\x43\xce\xf6\x87\xf7\x1b\xcd\xdb\xbf\x81\xc3\xbb\x45\xb5\x9a\x96\xb7\xfd\x3e\x7c\x02\x96\x37\x72\xdd\x71\x08\xd8\x3d\xe6\x1e\x5a\x52\xf4\x62\x81\x14\x6b\xce\x21\x33\x57\x4e\x92\x90\x77\x3a\x2f\xec\x66\x2f\xa4\x73\xc2\x06\x8c\x70\xc5\xfc\xe2\xee\xc8\x03\x7c\x70\x83\xf7\xc5\xee\x5d\x5d\x36\xf8\x39\x58\x5f\x13\xce\x57\x6e\x78\x28\xa9\x83\x8c\x6f\xc1\x6a\xa2\x24\x8e\xf1\xbd\x6a\xd7\x43\x89\x1c\x62\x7b\xdb\xac\x2e\x6e\x88\x74\xb8\x5e\x57\x58\x6a\xd3\xa5\xa5\x87\xd1\x45\x49\xac\x50\x90\x01\x3e\x17\x77\x0b\xad\xd0\x9a\x01\x36\x17\x37\x48\xbb\x34\x90\xa5\x73\x8f\x1d\x93\xeb\x66\x75\xb3\x62\x71\x9f\x7b\xe7\xb3\xe6\x70\x8d\x18\x5c\xac\xc6\x63\x98\xbf\xcd\xbd\xd5\x70\xa3\x77\x88\xbd\x2d\x59\x5a\x94\xc8\x31\xee\x96\x02\xde\x8d\x31\xb7\x85\x21\x1b\x25\xd4\x3d\x6f\xeb\x9e\xb5\x75\xef\x69\x61\xc0\xd8\xd2\x3d\x2d\x06\xbd\x2c\x4a\x5e\x16\xbd\x13\xd6\xd9\xda\x3e\x56\x16\x25\xb4\xe3\xa7\xa1\xe5\x64\x91\x27\x8b\xee\xf8\xd1\x32\xb2\x48\x75\x64\xd2\xf4\xd4\x70\xc4\xc7\xba\x67\x63\x4d\xb8\x58\x8b\xbd\x75\xd8\x71\x03\xbb\x8a\xf6\xf3\xb0\x4d\xbe\x15\x25\x57\xcb\xc2\xf6\xb1\xad\xb8\x69\x35\xe2\xb1\x51\x2a\x43\x08\xeb\xdf\xeb\x09\x98\xd6\x09\x78\xd6\xc9\x59\xd6\xa9\x38\xd6\xa9\x18\xd6\x49\xf9\x55\x17\xec\x2a\x5e\xcb\x45\x63\x56\xa9\xbc\x2a\x91\x55\x25\x72\xaa\x44\x46\x95\xce\xa7\x92\xd9\x54\x3b\x2e\xd5\x92\x49\xb5\xe6\x51\xad\x59\x54\x6b\x0e\xd5\x9a\x41\xb5\xe6\x4f\xdd\xb0\xa7\x0e\xb8\x53\x32\x73\x4a\xe3\x4d\x1d\xb1\xa6\x13\x71\xa6\xd3\x30\xa6\x06\x7c\x29\xfa\x28\x3b\xc6\x96\x16\xac\x28\x4a\xe8\x28\x57\x5a\xe3\x44\x51\x82\x7b\x99\xd2\xba\x53\x00\x4a\x62\x0f\x4f\xaa\xe5\x43\x91\xa7\xf9\x8a\x25\xed\x67\x43\x91\xfa\x59\xc5\x91\x4e\xcc\x85\x8e\x32\xa1\x24\x1b\xde\x10\x0f\xda\xe1\x3b\x91\xb7\x91\x36\x0b\xaa\x67\x3b\x71\x57\xd1\x31\x0e\x54\x7c\x0a\xed\x82\xa3\x65\x40\x6d\xe8\xc5\x01\xfe\xb3\xc3\x73\xe2\x04\xb7\xd8\x4f\x2d\xcb\x49\xef\xab\xa3\x5e\x8e\x13\x57\xcb\x8a\x84\x75\xcb\x70\x4e\xc3\x6f\x5a\xb3\x9b\xd6\xdc\xa6\x2d\xb3\x49\xe0\x35\xc9\xb0\xa3\x1d\xa7\x69\xc3\x68\xba\xd1\x84\x5b\xb0\x99\xcf\xad\x07\xb7\xe4\x32\x0d\x98\x4c\x74\x04\x8c\x41\x1e\xb3\xae\xd0\xc6\xad\xc4\xc3\x2c\x66\xce\x56\xe2\x76\xfb\x11\x0e\x53\x71\x95\x68\x05\xf3\x24\x0c\xe6\x04\xfc\xa5\x09\x7b\x29\x4f\x12\x38\xcd\xad\x73\xee\x72\x9c\xb9\xac\xfa\x0c\xa7\xb5\xeb\xe3\x2d\xeb\x5a\x7d\xc2\x20\xd5\x58\x00\x9a\x5a\x7d\x0b\x55\x78\x9b\xb3\xcc\x9b\x04\xf7\xdd\xc3\x8c\x65\x8d\x99\x44\xbb\x90\xe9\xf8\x4a\x6b\x6b\xc5\xb0\xce\x9e\x66\x63\x6f\x71\x95\x7a\x35\x3b\x65\x81\x72\x1e\x7e\x72\x8a\xd0\x93\x1d\x48\xd1\x21\x1b\x39\x0d\x17\xe9\x90\x89\xc4\xeb\x3d\x49\x2c\x24\x95\x83\x24\x32\x90\x76\xfc\xa3\x15\xfb\x48\xe3\x1e\x69\xcc\x23\x91\x77\xa4\xb2\x8e\x34\xce\x91\xcc\x38\x5a\xf1\x8d\x76\x6c\xa3\x2d\xd7\x68\xcb\x34\xda\xf2\x8c\xb6\x2c\xa3\x2d\xc7\xe8\x82\x61\x24\xf2\x8b\x59\xf3\xee\x70\xce\xa2\xcd\x81\x6d\xb8\xc9\x1a\x87\xbc\x2f\xb5\xee\x49\x4b\xfd\x8b\x0d\x04\x35\xef\x1d\x0a\xec\x2b\x5d\x67\xee\x79\xd6\x74\x9b\x1f\x5f\xdd\x0f\x51\x90\x2d\x1e\x78\x92\x04\xeb\x67\xf8\xee\x0f\xb5\xb7\x61\x3f\x56\x1c\x0d\x45\x6d\xc5\xe1\x46\x9e\xb5\x73\xa5\x95\x3a\x8c\xc8\x4f\x37\x54\xf8\x34\x8e\x45\xd2\x5b\x2e\x52\xba\x85\x3c\xd7\x86\x7c\x8b\x64\x03\x62\xf3\xba\x8a\xfd\xfb\x49\x15\x95\xce\xa3\x6c\x25\x69\x82\x48\xc2\x7d\xaa\x72\xb9\x97\xa3\xf8\x10\x45\xed\x18\x89\x0d\xd2\x4a\x63\xcb\xd4\x9f\x7f\xbb\x15\xef\xb9\x35\x07\x54\x13\x7e\xff\xe9\xe1\x87\x97\x09\x4f\xb3\x97\x0f\x3f\xbc\x2c\xd0\xd0\x13\x75\xe8\x3c\xcd\x9b\x39\x36\xe5\x57\x73\x7d\x4e\x04\xb7\x17\x2d\x01\xbd\x8e\x05\xd9\x36\xe1\xe9\x36\x1e\xb4\x96\x98\xd8\x46\xda\x13\xa9\x14\xdb\x5b\xa2\x35\x88\xf2\xdf\x43\x18\x44\xe2\x1c\x9c\xb0\xc7\x08\xb6\x71\x12\xfc\xaf\x18\x4d\xe2\x6e\x98\x6b\x67\x87\xef\x06\xab\x2d\x4b\x06\xe2\x18\x21\x95\x56\x38\x15\xd5\xa8\x42\xca\xd0\x93\x1f\xab\x71\x31\x68\x6b\x73\xf7\xf4\x55\x1c\x8e\xa7\xe4\x43\x2c\x36\x6d\x97\x4c\x21\xde\xb4\x08\xfa\xfb\xa5\x74\x03\x85\x51\xd7\x55\x32\xcd\x58\xc6\xd5\xc7\x4b\x2d\x81\x5c\x6f\xca\xb9\x71\x62\x54\xef\x32\x6c\xa1\xc4\xea\xd4\xf2\xc7\x20\xaf\xa4\x86\xef\x7c\xbb\x38\x5f\x5c\xe3\xd8\xce\x9f\xae\xe7\xbf\x1e\xc1\x9b\xf3\x0f\xf3\x23\xf1\xe7\xf9\xe5\x11\xfc\x3a\x3f\x3f\x5f\x7c\x3c\x82\xc5\xb5\xd8\x5a\xa5\x3f\xc6\x98\xcf\x84\xc9\x7d\xe1\xb8\x5b\xbd\xd1\x12\xa2\x76\xa3\x3f\x12\x95\x37\x91\x34\x1f\x3b\x67\x1c\xe7\x9f\x3e\xfa\x33\xd5\x32\xa3\x3f\xbb\x1e\xf9\x42\x63\x4e\x9b\x3e\x3b\x4c\x79\x6c\xfa\x0c\xc1\xd0\xd4\xdd\x59\x52\x36\x41\x3e\x47\x38\xac\x0e\x89\x8c\x84\x5a\x4d\x95\xd1\x31\x5c\x56\x01\x37\x5d\xc6\xe5\x6a\x71\x69\x98\xbd\x59\xfc\x6d\x7e\x04\x6f\xe6\xe7\x8b\x8f\x6e\x26\x06\x85\x75\x3e\x56\xf5\x18\x9f\x1d\xf3\xb1\x11\x1d\xb2\x3b\x1e\x4e\x37\x04\xcf\x85\x78\xcc\xc0\x98\xa9\x1a\x95\x23\xc2\x70\x24\xc8\x5e\x35\xfb\x8c\xe8\x60\x10\xb6\xb8\xc0\xad\xd7\xf1\x61\xdc\x2b\xb1\xf5\xcd\x7f\x33\x20\x5b\x3a\x47\xde\x32\xeb\x53\xf3\xa3\x6b\xff\x96\x6e\xe3\x83\x41\x6e\xb8\x3b\x5e\x58\xf0\x0a\xc7\xf0\x88\x65\xc1\x03\x87\x74\xc5\xc2\xf2\x0d\xea\x44\xd8\x7b\x8a\x0b\x76\x3c\xdd\x06\xf7\xd9\xe9\x21\x19\x81\x0b\x8d\x86\x86\xe6\x5a\xd4\x10\x6f\x7a\xa8\xe3\xb0\xce\x4b\xd4\x32\x34\xa6\xfb\x90\x3d\x01\xcb\xa9\xde\x20\x8d\xa3\xb1\x63\x1b\xc0\xac\xf3\x6b\x48\x83\xdd\x21\xcc\x58\xc4\xe3\x43\x1a\x3e\x89\xd6\x7e\x4c\x0b\x0f\xfb\xfb\x24\xde\x41\xf6\x38\xc4\x3a\xa4\xc1\x2e\x08\x59\x72\x1c\xf2\x68\x93\xe5\x97\x33\xa5\xda\x4b\xe1\xf7\xfc\x64\x73\x72\x04\x8f\x9c\x7f\x3e\x16\x57\x8d\x63\xf1\xa7\xbc\x0b\xd2\x3f\x0c\xd5\xb3\xf1\xc9\x85\x86\x70\x1f\xa7\x81\xe8\x51\xe5\xfa\x1b\x28\x7d\x74\x1c\x85\x86\x11\x11\xb3\x58\x7d\x71\xee\x81\x2e\xef\x58\x32\x70\x7a\x7c\x0f\xe7\x67\x97\x73\xd8\x87\x71\x26\xfb\xb5\xaf\x66\x5f\x66\x5f\x82\xd1\xf3\xfd\xc8\xb9\xb4\x35\x24\x7e\x11\x22\xc7\x7f\x6c\xb8\x41\x0d\x4b\xeb\x0c\xaa\xea\x30\xdb\xe6\xd5\x7e\x01\xf6\x25\x18\xb8\x5e\x9b\x1d\x83\x0d\xd6\x58\xe3\x15\x16\xbd\xbe\x76\xbe\x56\xad\xaf\xf9\x3a\x30\xfc\x79\xa0\x16\x8d\x69\xaa\x7e\x23\x44\x9b\x15\xc0\x74\xbb\x14\x3b\x7a\x1c\xe9\x1e\x46\x44\x4b\xa8\xcf\x3d\x69\x24\x38\x66\xf2\x0a\xc9\x12\xf5\x6f\x23\xcb\x6f\x8e\x92\x6a\x4e\xe7\x37\x6f\x67\xe7\xf3\xe6\x51\x42\x4c\xb6\xd9\xf5\xd8\x29\xfd\x7c\xf1\xd3\xf7\xaf\x86\x4f\x1a\xe3\xe7\x8c\xe3\xee\xfb\x47\x7e\xaf\x2a\x37\xf6\x23\x51\xb7\x9e\xdf\x3c\xb9\x5f\x24\x7e\x75\xba\x48\x0c\x4b\x43\x2c\x12\xbf\xfa\x45\xe2\x5f\x68\x91\xf8\xd5\x2f\x12\x3d\xcf\xb3\x2c\x12\xbb\x38\x65\xc1\x4a\x75\x4c\xbd\x3a\x3d\xeb\x41\x6b\x38\x5c\xd4\x4a\xeb\x7e\xd5\x1a\x03\x3d\x3f\xef\x8c\xf6\x55\x1c\x49\x98\x3e\x48\xa5\x5a\x32\xda\xf0\x35\xb0\x14\x18\x6c\x92\x40\x39\x55\x04\xa1\x04\x4b\xa5\xa3\x06\x5b\x6d\x8b\x12\x8d\x4f\x7f\x94\xe3\x0c\xe2\xd5\xea\xb0\x97\xfe\xc1\x71\xc4\x21\x4e\x60\x27\x91\x7a\x21\xe1\xa4\x53\xe7\x55\x1c\xdd\x87\xc1\x2a\x6b\x2c\x0d\xc7\xf2\xbd\x9d\x6a\x1f\x43\x12\x3f\x6a\xfe\x76\x15\x87\x87\x5d\xd4\xf9\x87\xbe\x65\x47\xfd\xbc\xb3\x16\x0d\x05\xa6\x1a\x0a\x3e\xd5\xd5\xc8\x09\xe9\xad\x1f\x75\x5a\x5c\x5d\xc2\x44\xd3\xe6\xd5\x29\x2e\x2b\x6a\x7c\xc8\x06\x68\x2e\x39\xb2\x01\xf5\x95\xd6\xa9\x92\x3b\x57\x8e\xb0\x83\x03\x76\x2a\x25\x5f\x51\xbb\x54\xb4\xd7\x3c\x23\x75\xaf\x89\x92\xb7\x47\xb5\x3b\xb8\x27\x9a\x8c\x71\xdd\x57\x8e\xed\x3f\x5b\x1e\x6c\xb6\x99\x7e\x65\x18\x0f\x56\x36\x1e\x96\xac\xd5\x11\xef\xe5\xeb\xb4\xbf\xec\xf4\x87\xaa\x5a\xe5\xe5\x12\xf2\x23\xd8\x71\x96\x1e\x12\x75\xb9\x95\xb3\x33\xfd\xfb\x81\x25\x7a\x13\x98\x9a\x90\x43\x9f\x36\x70\xfe\x68\xd5\x5b\x6d\x22\xc3\xbf\x1c\xeb\x99\x01\x19\x9d\x6f\x0f\x22\xd5\xb4\x41\x1c\xb1\xb0\x58\x5a\x1a\xa6\xb0\xa2\x4d\xf4\x3b\xf3\xf8\xa1\xe3\x2e\x64\xd1\x67\xb7\x07\xb4\x37\x42\x24\xfa\x80\xd6\xd3\x4e\xc3\xc2\x5a\x4a\x23\xf9\x31\x90\xee\xd9\xaa\xf7\xd6\x3a\xb8\xe4\x36\x9f\x63\xf8\xa2\x0e\x0b\x03\xbf\x48\x57\x71\xc2\x57\x2c\xe9\xbf\x6a\x1f\x43\xc6\xbf\x0c\x89\x08\xe3\x4d\x7a\xc5\xa2\xde\xb3\x59\xf9\xef\x6e\x7b\xe9\x7c\xe4\xb5\xe8\x9e\x32\x15\xf8\xed\xb4\xfc\xdd\xe0\xd8\x32\x3b\xb0\x9b\x44\x43\xa6\x1e\x7b\x0d\x42\xcf\xb6\x26\x80\xaa\x8d\xb2\x49\xaf\xb6\x71\x9c\xf2\x34\x8f\x7e\x17\xc6\x1b\xe0\x51\x96\x47\xad\x1b\x39\x48\x2a\x7f\xb1\x13\xb8\xe1\x1c\x7e\x9b\xad\x1f\x58\xb4\xe2\x6b\x39\x62\xe0\xff\x1e\x54\xb8\xa2\x7e\x6b\x74\x18\x6f\x36\x41\xb4\x79\xb9\x8e\x57\xe9\xcb\x87\x80\x3f\xbe\x64\xb9\x84\xe3\xbf\xab\xc2\x83\xca\x2e\xf1\x2c\xa2\xf0\xa9\x59\x63\xf1\x41\x3b\x26\x1d\x1e\xca\xe0\x90\x92\x2b\x31\x75\x6d\x9b\x45\x20\x77\xeb\xa2\xac\x14\x26\xc3\xe5\x84\xf5\x37\x0d\x89\x29\x00\x8f\x4b\xb6\x1b\x33\x1d\x98\x82\x7f\xed\x28\x92\xf5\x37\xe0\xae\x6a\x42\x80\x54\xde\xe5\xcd\x5f\x05\x93\x56\x61\x56\xc2\x70\x3c\xd2\x9b\x58\x69\xc4\x5e\x7e\x02\x6f\x95\x05\x46\x42\x58\xe1\x53\x15\x5d\x5a\xb4\x78\x7a\xd8\xef\xe3\x24\x53\xc1\x87\x46\x24\x96\x8e\x16\xbc\xd8\xbc\xa4\xa7\x45\x7e\x63\x2a\xae\xce\xdb\x38\xcd\x8a\x77\x0c\xf7\x23\x81\x0f\xc1\x13\x21\x46\x0c\x08\xc5\x18\x82\x2e\x93\x70\xc9\x8c\xad\xc6\x2d\x55\xc7\x65\x7f\xbf\x86\xb7\x62\x46\x16\xff\xbb\x63\x11\xdb\xf0\xe4\xe5\x95\x6a\xde\x51\xc3\x81\xf4\xf6\x78\x2d\x87\x53\xcf\x6f\xcb\x85\xd7\xed\x7e\x74\x33\xba\x9e\x23\xf7\xa3\x71\x81\xad\xc5\xb3\xfc\x30\x48\x0f\xbb\x1d\x4b\x82\xff\x95\x78\x46\xcb\x91\xfa\x99\x0e\x16\x56\x5b\xd7\xf8\xa1\x02\xca\x30\x64\x43\xd5\x6c\xb9\xcc\x59\x6e\x95\x1b\x76\xd8\xf0\xbf\x05\xfc\xd1\x64\xed\x34\x08\x4d\xd9\xe6\xa4\x0b\xf1\x24\x45\xd1\xd8\x20\x32\x93\xde\x18\x51\x1f\xc5\x52\xa7\x92\x8e\x49\x9f\xae\x72\x78\x65\xb1\x34\x2c\x01\x53\x4d\x32\x32\x27\x47\xcd\x58\x98\x30\x91\x61\xfc\xc8\x93\x37\xf1\x21\x32\x88\x0b\x89\x30\x94\x22\x4d\xa5\x9a\xc3\x68\x51\xad\xd1\x92\x5d\x25\xa5\x28\x0b\x77\xa2\x70\xe5\x65\x23\x1b\xd6\xa4\xe9\xd4\xd3\xb1\xbd\xe6\xd6\x40\x69\x6c\x05\x16\x3e\xb2\xa7\x14\xee\x38\x6c\x12\xce\xcc\x82\x7f\xca\xe0\x4b\x71\x52\x91\x02\xa2\x5a\x63\x55\x39\xec\xf7\xdf\x62\xef\x7c\x28\xab\x85\xef\x1d\xf9\x49\xcf\xd4\x3b\x21\x97\xae\x7c\xcc\x24\x32\x17\xa6\x6b\xd2\x3d\x4b\x3e\xcb\x05\x7b\xba\xd5\xeb\xa6\xf1\x8e\x49\x96\x30\xc4\x2b\xb0\xeb\x98\x6c\x21\xfb\x75\x6c\x7c\x4f\x02\x75\xf7\x2b\xbf\x64\xf9\xb4\x1f\x1e\xce\xe6\x4b\xe3\x2e\x88\x66\xd8\xd0\xb9\x28\x07\xe9\xb6\x16\xbb\xf3\x3e\xbb\xb5\x2f\x8e\x1a\x54\xa2\x68\x4b\x1e\xad\x4c\x42\x03\xe4\x6a\x24\x35\xab\x82\xdd\x3e\xe4\xa2\x4e\x7c\x0d\x77\x4f\x39\x88\x54\x06\xba\xd8\x05\x51\xb0\x33\x62\x67\xab\x84\x8d\xca\x01\xa2\xc8\x94\x21\x1d\xb2\xea\x87\x2a\xe3\x74\x35\xef\xc4\x9c\xfd\xc2\x44\xfd\x8e\x20\xb8\xaf\xc5\xdc\x4d\x61\x7f\xb8\x0b\x83\x74\xcb\x45\x33\xac\x38\xf0\x87\xa1\xe3\x4a\xf5\x7c\xff\x4a\x7c\xd1\x21\xe3\x29\x04\x19\x3c\xca\xf5\x24\x8a\xc5\x55\xf3\xb3\xa8\x5f\x94\xf2\xca\xd3\x9e\x19\x2c\x7f\xf9\x67\xab\x6a\xb1\x4c\xea\xfe\xd5\x0b\xca\xd0\xc3\x69\x1e\x9d\xb7\xc0\xe4\x4d\xd6\xaa\x7d\xae\x00\x64\xd1\x1a\xf8\x97\x20\xcd\x52\x75\x2f\x93\xd6\x89\x6d\x30\x9e\x64\xb5\x39\x61\xa6\x1d\xd7\x37\xe6\x93\x13\x9c\xad\x66\xcd\x97\x1a\xe1\xdd\x4d\xbb\x5e\x33\x3f\x94\xa8\x8b\xd8\x6c\x64\xbb\xa9\x79\x61\x14\xe3\x54\x2e\x87\x41\x1e\xe7\x76\x55\x54\x4e\x67\xe3\xbb\x9a\x5d\xff\xfc\xe9\xed\xfb\xd9\xf5\xf2\xd3\xf2\xd7\xab\x39\x36\x1b\x8b\x2a\x7f\x7e\x76\x39\x3f\xca\xff\xfc\x66\x76\x3d\x1e\xd6\xcc\x0c\xea\x3b\x1e\xac\x9e\x71\x71\x51\x3b\xe3\x1f\xbf\x19\xb4\x20\x9a\x00\x05\x40\xd7\xbb\x18\x39\xbb\xeb\x2c\xc1\x55\xbd\x2a\x57\xb4\x32\x66\x51\x56\xb8\x80\x8f\x34\x42\x7e\xa8\xa9\xf6\xd5\x4d\xf0\xd0\x8d\x1d\xf2\xe2\x45\xe9\x17\x2b\x07\xd1\xd8\x94\x7f\xa7\xc2\x4b\xb3\x55\x26\x4e\x39\xea\xf8\xf4\xe5\xa8\xf5\xa6\x20\x0f\x37\xb0\x66\xd1\x78\x22\x1a\xe5\xce\x1e\xdc\xc3\x17\x51\xae\x3c\x6c\x35\x8e\x52\x85\xa8\xaa\x65\xc6\xf4\x28\x5b\x96\x41\x96\x04\x9b\x0d\x4f\xc4\x21\x2e\x8c\x1f\x65\x04\xea\xfc\xa4\xdd\xff\x86\x51\xb9\x05\xfb\xd1\x7c\x03\xbb\x8b\x1f\xf8\x09\xdc\x28\x47\xbd\xf0\xe9\xa8\xfc\x20\xf9\x2f\x2f\x65\x0d\x46\x64\x33\x78\x64\x49\xa4\xb6\xc4\x81\xb7\xbc\xcc\xbf\x26\xdb\xf2\xb1\x13\xa9\xbe\x57\x8a\xb7\xa8\x86\x3f\x86\x43\x24\x1b\xfd\x8b\x8c\xc3\x39\x22\x71\x7f\xc8\xe4\x9e\x56\xeb\x5e\x25\xe7\x04\x7e\x7f\x9a\x77\x51\x72\xd8\xed\xd3\xe2\x2d\x27\x7f\x00\x98\x8d\xed\x46\x2c\xaa\x76\x60\x19\x7e\x7e\xcd\x13\x71\xfc\x2b\xab\x2e\x6d\xe3\x52\x45\x1b\x87\x61\xfc\x38\xbe\x7d\xdc\xc7\x87\xa4\x3e\x8d\xfe\x51\xba\xf1\xc2\x5f\x5f\x1d\xc9\xc0\xb9\x19\xdf\xc4\xc9\xd3\x6b\x78\xf1\xe2\x74\x76\xf9\xd3\xfc\xfa\xc5\x0b\xf1\xf7\x79\x5b\x8f\xad\x66\x2f\x5e\x48\x1f\x69\x51\xe6\x9f\x47\xaf\xeb\xe2\xff\xd2\x11\xff\x71\x76\x7d\x79\x76\xf9\x13\x5d\x7e\x5d\xfc\xf7\x6e\x6a\xaf\x5c\xcc\x3b\xd2\xff\xe8\xa8\xf2\x95\x78\x79\xcd\x8a\x5e\x17\xce\xae\xfa\xe9\xdd\xeb\xe7\x55\x3c\xea\x08\x25\x43\x13\x35\x87\xb4\x18\x86\xaa\x01\xd4\x30\x3c\x2a\x5e\x64\x76\xa5\x96\x35\xf9\xfe\x15\xdc\x1d\xb2\x9e\xaa\xfd\xf1\x15\x30\xc8\x1b\x21\x7f\xc5\x88\xcc\xbc\x02\xe2\x94\x23\xe3\x8d\xdd\xf1\xec\x91\xf3\x48\x4a\x8a\xd6\xf0\x17\xf1\x1f\x58\xfc\xdc\xac\xef\x88\xcc\xde\x65\xeb\x2f\xed\xba\xff\xb5\x56\x5f\x93\x85\x57\xf9\xf9\x36\x1b\xad\xfb\x1a\x29\x55\xb5\xb3\x89\xd0\x93\xe1\x23\xc3\xb7\xa6\x6e\x37\xca\x57\x43\x3d\x50\x1a\x1e\x02\x70\x49\x59\x8c\xc8\x37\xb0\x46\x6d\xcd\x48\x32\x17\x6d\x63\x4a\xc3\x01\x85\x88\x33\x90\x09\xea\x5b\x1d\x63\x3e\x52\x6e\x87\x9c\x1b\x20\xe3\x8c\x24\x22\xe8\x39\x40\x44\xdc\xc0\x53\x74\x60\x4a\xd2\x81\x29\x4d\x07\xc6\x44\x1d\x98\x53\x75\x60\x4e\xd6\x81\x11\x5d\x07\xd8\x4c\xa8\x76\xb3\x10\x93\xf9\xd4\x7e\x26\x62\x73\x98\x4e\x44\xde\x01\x9e\xbe\x33\x92\x39\x9c\xb0\x14\x45\xe0\x01\x6a\x72\x51\xb3\x8e\x9a\xd1\x78\x60\x48\xe4\x81\x29\x95\x07\xd6\xc3\xd6\x84\xce\x03\x47\x84\x1e\x98\x52\x7a\x80\x35\x71\xa0\x8d\x1c\x24\x62\x0f\x48\xd4\x9e\xd1\x80\xcf\xcd\x19\x2e\xc8\x3d\xe8\x86\x3b\x99\xc6\x6a\xb1\x34\x34\x10\x83\xcd\x8a\x87\x78\x47\x4f\xca\xf7\x3c\xac\xad\x18\xab\x52\xdb\xdc\xb2\xeb\x8f\x74\x8f\x54\xe6\x2a\xf8\x6f\xcb\xe1\x26\x63\xab\xcf\xeb\x24\x78\xe0\x49\x01\xef\xc1\xec\xea\xcc\x95\x81\x36\x43\x67\x63\x47\xa5\x72\xec\xed\x3f\xc3\x34\xe7\x8e\x3a\x11\xfd\xde\x46\xc7\xaa\x32\xb0\x67\x09\xdb\xf1\x8c\x27\x29\x31\x1c\x8e\x99\xb9\x09\xe4\x8a\x7d\x6f\x56\x4f\x5c\xc2\x46\x86\x4d\xdc\x89\x4e\xdb\xd9\x4e\xcd\x88\x4c\x8a\x39\x51\x77\x63\xab\xd1\xe8\xfd\x3a\x71\xa3\x92\x6b\x3e\x76\x9c\x74\x8c\xa4\x42\x33\xf9\xcc\x07\x69\xdf\x2c\x2d\x33\xb5\xe4\x64\xbb\x43\x98\x05\x7b\xe3\xb4\x34\x8d\x4c\xff\x71\x52\x85\xfe\x86\x87\x80\x3f\xa6\xc5\x12\x3e\xec\x4c\x54\x7f\xf0\x59\x40\xc9\x39\x40\x49\x91\x81\x1d\xe5\xff\x9c\x2a\xfb\xe7\x14\xb9\x3f\xbf\x52\xe6\xcf\x29\xf2\x7e\x1a\x65\xfd\x84\x95\x91\x57\x45\x4d\xe6\x48\xce\x4f\x6c\xbe\x2b\xf7\x19\x3f\xa7\xca\xf7\x69\x94\xed\x93\x14\x92\x73\x20\xd7\x67\x33\x7f\x27\xaa\xb6\xfa\x4c\x9f\x9a\x80\xaa\xe4\x36\x40\x06\x82\xb7\xc9\xf2\xe9\x62\xfd\xb2\xc8\xf0\x39\xfd\x7e\x69\x99\xd1\xd3\x34\x9f\x27\xdc\x61\xc2\xfa\x8e\x67\xf3\xcc\xe3\x66\x63\x86\xd0\x70\x2e\xcf\x2a\x3f\x27\x42\xe6\x60\x26\xcf\x66\x76\x4e\x84\xd4\xfe\x3c\x9e\xcd\xdc\x9c\xa8\x75\x5e\x9b\xc5\xb3\x27\x33\x27\x42\x70\x37\x21\x73\x7f\x5e\x4e\x8c\x58\x5d\xfc\xee\x6e\x4c\x6e\xe4\xba\xa4\x89\xe0\xad\x8f\xca\x8d\x90\x5b\x66\xef\x1c\xc9\xc8\x89\x10\x59\x9d\xef\x06\xf3\x71\xa2\x3e\x9e\xf5\x67\x73\x5d\x92\x32\x5b\x96\x79\x3b\x27\xca\xc5\x69\x94\x89\xb3\x16\xf6\x1b\xb7\xed\x19\xe4\x9b\xc2\x6d\xd1\x43\x59\x38\x9b\x99\x35\x31\x27\x8a\xa1\x1c\x9c\xfa\xbc\x9a\x08\xe9\x3d\x19\x38\x75\x31\xbf\x31\xfd\xa6\xcb\xbf\xa9\xcd\xa9\x89\x99\x64\x3d\xd9\x37\xfb\x32\x6a\xe2\x45\xe7\xb9\x37\x47\xf3\x69\xe2\x25\xd7\x33\x6f\xea\xb3\x69\xe2\x65\x36\xf3\x6e\xea\x73\x69\x5a\x49\xfd\xf1\x95\x4e\xea\x2b\x8a\x54\x4d\xce\x4d\x07\x79\x34\x71\xd1\xe4\x69\x39\x34\x69\x19\x34\x49\xf9\x33\x49\xd9\x33\x49\xb9\x33\xa9\x99\x33\x89\x79\x33\x6d\xb2\x66\x5a\xe5\xcc\xb4\xcc\x98\x69\x99\x2f\xd3\x32\x5b\xa6\x65\xae\x4c\xcb\x4c\x99\x2e\xf2\x64\x5a\x67\xc9\x24\xe6\xc8\xa4\x64\xc8\x74\x92\x1f\x73\x92\xec\x98\x53\xe4\xc6\x74\x9f\x19\x73\x82\xbc\x98\x13\x65\xc5\x34\xca\x89\x29\x53\xf6\x21\x6f\x16\x6e\x33\x62\x9a\xe5\xc3\x44\xdd\x29\x9e\x21\x1b\xe6\x14\xb9\x30\xa7\xca\x84\x39\x41\x1e\x4c\xa3\x2c\x98\xa4\xe4\x92\x06\x39\x30\xab\xbc\x96\x08\xb9\xe3\x19\x30\x8b\xac\x96\xc8\xd9\xd0\x9b\xff\xb2\x95\xd3\x12\x7b\xc3\xd2\x64\xbf\x6c\x67\xb4\x44\xa9\xf1\xfb\x73\x5f\x3a\x49\x22\xd5\xca\x7c\x69\x9b\xcd\xd2\x32\x97\xa5\x5d\x26\x4b\x74\x1e\x4b\xdb\x94\x90\x84\xf2\xf4\x0c\x96\x2e\xb4\xbc\xe4\xec\x95\xd3\xeb\x78\xad\xb2\x55\x1a\xe6\xaa\x34\xe3\x99\x8a\xc7\x2c\x53\x25\x5e\x2b\x35\x92\xa7\xb2\x9b\x7b\x12\x65\xdf\xe9\x66\xa9\xd4\x65\x9e\xc4\xad\xec\xda\x1c\x95\xda\xbc\x93\x08\xb9\x9d\x0c\x95\xfa\xac\x93\x58\x75\x5f\x33\x3f\x65\x5f\xce\x49\x84\xd4\x46\x76\xca\x91\x8c\x93\xd4\x9e\xea\xcf\x37\x89\x33\xcf\xf6\xeb\xb5\x5b\xda\x6a\x8c\xa6\xbf\x47\xaf\xdd\xd6\x54\x23\xf7\x5f\x83\x4c\x93\x28\x5d\x3c\x8b\x36\xdc\x79\x9e\x49\xa3\x2c\x93\x14\xab\xae\xeb\x1c\x93\xd3\x64\x98\x74\x9f\x5f\x72\xba\xec\x92\x53\xe4\x96\x74\x96\x59\x12\xab\x09\x24\x64\x95\xa4\xe5\x94\x24\x65\x94\xb4\xc9\x27\x69\x91\x4d\x92\x92\x4b\x92\x92\x49\x92\x94\x47\x92\x96\x45\x92\x92\x43\x92\x98\x41\xd2\x22\x7f\xa4\x4d\xf6\x48\xbb\xdc\x91\x76\x99\x23\xed\xf2\x46\xda\x65\x8d\xb4\xcb\x19\x69\x9f\x31\x92\x98\x2f\xd2\x2c\xb6\x63\xf1\xa0\xaf\x24\xf8\x58\x8f\xc5\xd3\xe3\xe4\x2c\xce\x37\xbf\x55\x4e\x66\x79\xed\xcd\xd3\x06\x3e\xfc\xf0\x52\x15\x31\xcc\x0b\x98\xb3\xdb\xc1\x9a\x47\x99\x74\xc9\xab\x39\xa6\x17\x7a\x9a\x32\x08\xa1\xe9\xc1\x5d\x6c\x5d\x65\xb4\xc1\x2c\x36\x0f\x0e\xb2\x0f\x56\x9f\xdb\x57\xaa\xe7\xf1\xa9\xbd\xd2\xbc\x19\x27\xc7\xed\x45\x92\x5c\x9f\xe6\xb8\x62\xd1\x67\x31\x8a\xee\x98\xf4\xed\xa9\xbb\xb4\x4a\x89\x53\x79\xae\xa2\x70\x2d\xf5\xb8\xd0\x06\x60\xd0\xad\x96\x80\xe9\x7b\x0f\x4b\x7a\xa9\xa7\xe9\xea\xf5\x5e\x5e\xf9\xa1\x08\xe2\x94\xe4\xdd\x9b\xc5\x90\x72\x83\xd0\xa0\xf5\xa7\xa3\x80\xdf\x33\x09\x35\x27\xf1\x61\x53\x8f\xcf\x8a\x51\x1c\x77\x3c\x0f\xf4\xd8\x17\xc6\x6e\xb2\xb8\x3a\x82\x37\x8b\xe5\x72\x71\x31\x9d\x99\x9a\xca\x8a\x15\xe5\x97\x8b\x2b\xc4\xaf\xd5\xd7\x18\x16\x88\x0e\xbb\x6a\x1c\x61\x27\xd3\x50\xb0\x77\xdd\x33\x1e\x00\x5e\xf7\xb4\xe6\xe0\x65\xbd\xc6\xb4\x61\x2e\x46\xf9\x8e\x45\x4f\xcd\x31\x1a\x2b\x1e\x11\xa7\x3f\xb0\x1c\xd4\xf9\x04\xbb\xe0\xd9\xf6\xb9\xfd\xef\xaf\xeb\xaf\xfe\x16\xd7\xb3\x46\x05\x2d\xd6\xb4\xdb\xbc\x91\x3f\xed\xa4\xa4\xdb\x9a\x49\x0b\xd7\xd7\x52\x3d\xd6\x74\x5a\x55\xba\x21\x15\x03\x59\x1a\x83\xe3\xf5\x61\x85\x35\x33\x2a\x0b\xa3\x0a\xc0\x2d\x03\x1f\xdf\xf1\xba\xaf\xec\x9e\x25\x58\x89\xad\x71\x9d\xfb\xae\xd7\x75\x32\x28\xc8\x56\x3d\xc5\x9a\x7b\x31\x5f\xbe\x5f\x9c\x36\x39\xdb\xfc\xef\xa4\x2b\x17\x42\x64\x51\x6c\xf6\x4b\x25\xe2\xec\xb2\xfc\xf3\xcd\x87\x8b\xf2\xcf\xe2\xce\x7c\xb3\x9c\x6e\x8d\xee\x7e\x15\xbe\x30\xf2\xfe\x5b\x7d\x3d\xa1\x10\xea\x4a\x5f\xb5\x27\xbe\x90\x6a\x78\x83\x72\x0a\xb6\x60\xc9\xd3\xec\xb9\x79\xb5\x1b\xcd\x9b\xbf\xe6\xd9\x9a\x5c\x9f\xa6\x55\x66\xbf\x0f\x9f\x80\xe5\xed\xda\x70\x93\x60\xf7\xa6\xa7\x75\x80\xdb\x5a\xc1\xfa\xda\xe7\xa1\xb2\xda\xe3\xa1\xb2\xd1\xc7\x43\x65\x1e\x2a\xf3\x50\xd9\xd0\x8b\xff\x2d\xa1\x32\xdd\x5e\xe6\xe9\x32\x4f\x97\x79\xba\xcc\xd3\x65\x9e\x2e\xf3\x74\x99\xa7\xcb\x3c\x5d\xe6\xe9\xb2\x6e\x29\x4f\x97\x79\xba\xcc\xd3\x65\x9e\x2e\x1b\x7a\x3c\x5d\xe6\xe9\x32\x4f\x97\x79\xba\xcc\xd3\x65\x9e\x2e\xcb\x1f\x4f\x97\x79\xba\x0c\x23\x67\x7a\x65\xaf\xc7\xcc\x3c\x66\xe6\x31\x33\x8f\x99\x79\xcc\xcc\x63\x66\x1e\x33\xf3\x98\x59\x7f\x51\x8f\x99\x79\xcc\x8c\x5c\xda\x63\x66\x03\x4f\x3b\xdd\xc3\xb5\x38\x92\x3c\x6f\xce\x07\xf9\x4a\x73\x21\x6e\xef\x25\x66\x2f\x6f\x5c\x36\xae\x34\x69\x1f\x18\xc8\xb3\x5c\x99\xc2\x2f\x7b\x34\xb9\x76\x74\xa9\x26\x03\x75\x14\xce\x6f\x72\xcd\xa3\x78\x17\x44\x2c\x33\xcb\x02\x67\xef\x31\x7b\x5a\xbd\x10\x57\x7c\x82\x7e\xc5\xd6\xa5\xa3\x35\xaf\xb5\x5e\x71\x30\x94\xfd\x6c\x76\x0c\x30\x4f\xef\x01\x98\x14\x1f\x40\x73\x9e\xc5\xba\x4e\x03\x65\x30\x80\x7d\xca\x0f\x78\xbe\x41\x41\xa9\x1a\x26\x15\x08\xf6\x22\x6b\x96\x0e\x04\x75\xf6\x56\x89\x43\xc6\x52\x82\xe0\xef\x47\xe6\xc9\x43\x80\x34\x5c\xc1\xc6\xdf\x1b\xe8\x1a\x3f\x67\x7e\xdf\x30\xa1\xef\x37\x18\xfa\x7f\x23\x45\x96\x19\xc4\x47\x7c\xc0\x91\x62\x27\xf0\x18\x07\x43\xaf\x71\xa4\xc8\x02\x91\x1a\xf7\x1c\x47\x0a\xee\xfa\x25\xea\xbc\xc7\x91\x42\x8b\x7b\xae\x63\x0f\x72\x98\xd0\x8b\x1c\x4c\x3d\xc9\x91\x32\xab\x55\x73\xc0\x9b\x1c\x3b\x6c\xeb\xbe\xe7\xbd\x1e\xe5\xd8\x41\xdb\x55\xaf\x58\xda\x61\xec\x3c\xcb\xc1\xe1\x4a\x69\xe1\x61\x0e\x5f\x65\xcf\xb7\x74\x36\x07\x53\x87\x73\xec\x04\x89\x5b\x98\xa6\xd6\xe9\x1c\xbd\x5d\x74\x0d\x18\x1d\xc7\x73\xa4\xcc\xba\x9b\x7a\x8f\xf3\x39\x69\xab\x68\xba\xaa\x6b\x1c\xd0\x49\xab\x59\xcb\x5d\xbd\xcf\x09\x1d\x2d\x5b\xe7\xb2\xde\x5a\xf0\xf1\x03\x60\x5c\xab\x8f\x94\x39\xe2\xba\x8e\xf4\xb1\x11\xcf\x14\xee\xeb\x60\xec\xc2\x8e\x1e\xfe\xee\xdd\xd8\x61\x1a\x57\x76\x78\x06\x77\x76\x30\x75\x69\x47\xef\xc3\x4e\x8d\x11\x30\x89\x41\x02\x74\x46\x89\xa3\x61\xf7\x76\xa4\x78\xad\x33\x7c\x9f\x8b\x3b\x76\xd5\x1d\xb1\x50\x50\x00\x7c\xa8\x41\xf8\x75\xa7\xf8\x5e\x57\x77\xec\x9c\xd6\x3b\xc6\xb7\xdc\xdd\x69\x42\x7b\x9d\xe3\x75\x2e\xef\xb4\x57\x34\x1d\xe4\xf5\x6e\xef\x34\xc9\x4d\x27\x79\xbd\xeb\xbb\xb5\xe4\x57\x3f\x76\x5d\xfd\x2b\xf7\x77\x6a\x57\x92\x9d\xe5\x01\x6d\x1d\x03\xb2\xd3\x3c\x90\x1d\xe7\x81\xea\x3c\x0f\x54\x07\x7a\xa0\x3a\xd1\x83\x85\x23\x3d\xd0\x9d\xe9\xbb\x45\x71\x96\x1e\x4d\x79\x9c\xa1\x09\xec\x1d\xeb\xc1\xde\xb9\xbe\x57\x84\xb9\xed\xa7\x4f\x04\xc2\xf8\xd4\x27\x02\x61\x81\x02\x47\xce\xf6\xe0\xc2\xe1\x1e\xe8\x4e\xf7\x40\x74\xbc\x07\x57\xce\xf7\x30\x95\x03\x3e\x4c\xe4\x84\x0f\x66\x8e\xf8\xe8\xb3\x71\xd7\x3f\xb6\xed\x8c\x8f\xbd\x1f\x2a\xd7\xfd\x71\x87\x7c\xec\x99\xaa\x74\xdf\xef\x77\xca\xa7\x5d\x65\x4b\x17\x7e\x8d\x63\x3e\x52\xa2\xce\x8d\xbf\xcf\x39\x1f\xdb\x55\x2d\x57\x7e\xbd\x83\x3e\xa9\xb3\x86\xdc\xf9\xf1\x4a\x39\xe7\x2e\xfd\x60\xe6\xd6\x8f\xbd\xbf\x95\x97\xcd\x41\xd7\x7e\xec\x55\xa8\x04\x01\xc6\xdd\xfb\x91\xa2\x9b\x7e\xf6\x7d\x2e\xfe\xd8\xce\xd2\x00\x01\x5d\xc1\xf8\xfb\xa0\x16\x0a\x68\x2f\x37\x48\xb1\x43\x60\x40\xe9\xee\x6f\x31\xab\x34\x70\x00\xc5\xdf\x55\x3c\x53\x01\x02\xe0\x1e\x12\x00\x7b\x50\x00\xac\x61\x01\xa0\x00\x03\x60\xa3\x18\xb7\x03\x07\xc0\x0a\x1e\xb0\xaa\xb8\x2b\x88\x00\xbe\x8a\x3e\xdf\x8a\x27\x00\x73\xa6\x80\xa0\xcf\x1f\xe3\x0a\x6c\xf6\xeb\x1e\xb6\x00\x29\xb2\x8f\x44\x68\xf1\x05\x68\xc3\x67\x0f\x8d\xd0\xcb\x18\x90\x8c\x25\x4d\x22\xa1\xcb\x19\xa0\xd5\x98\x1a\x2a\xa1\xc5\x1a\x50\x0e\x6f\x3d\x64\x42\x93\x37\xc0\x6e\xb2\x83\x74\x42\xb3\xf7\xa8\xa7\xe2\x36\xa1\x50\xb7\x50\x90\x8e\xc5\xfd\x94\x02\xe0\xcd\x65\x8e\x49\x05\x30\xa3\x15\x08\x1e\x00\x23\xc4\x82\x03\xfb\x4b\xd7\xfe\x40\xbb\xb5\x54\xf6\x07\x0d\xb9\x80\x9f\xa1\x8e\x23\xe6\xc0\x24\x51\x73\x00\x17\x39\x07\x3b\x4d\x87\x98\x87\x1e\x92\xc1\xd6\xa6\x61\x1b\x3c\x07\x5a\x01\x74\x26\xd7\xf0\x12\x18\x08\x20\x73\x10\x40\x65\x21\xc0\x92\x87\x00\x3b\x26\x02\x88\x5c\x04\x10\xd9\x08\xa0\xf2\x11\x40\x66\x24\x80\xc8\x49\x00\x9d\x95\xe8\x54\x96\xa2\x47\xa6\x33\x13\x60\xcd\x4d\x80\x35\x3b\xd1\x27\x01\xab\x41\xb6\x61\x28\x7a\x24\xa0\xb5\xd8\xb6\x2c\x85\xa6\x1e\xe6\x69\x7b\x00\x99\xba\x07\x1c\x31\xdf\x88\x54\x2b\xf0\x2d\xa5\xf1\x81\x69\x52\xf9\x80\x4d\x3a\x9f\xe8\xb0\x13\x87\xdf\xe7\xa2\x26\x2e\x8b\xd7\xe1\x0a\x4f\x70\xb5\xc6\xd5\xa4\x63\xe6\x28\xdb\xcd\xf3\x12\xa6\x85\xbe\x71\x5e\xa2\x1c\x10\x9e\x96\xf0\xb4\x44\xe7\xf1\xb4\x84\xa7\x25\x3c\x2d\xe1\x69\x09\x4f\x4b\x8c\x3d\xff\x3a\xb4\x84\x6e\xc7\xf7\xac\x84\x67\x25\x3c\x2b\xe1\x59\x09\xcf\x4a\x78\x56\xc2\xb3\x12\x9e\x95\xe8\x7d\x85\x67\x25\x3a\xe2\x3d\x2b\x61\x56\xd0\xb3\x12\x98\xf2\x9e\x95\xf0\xac\x44\xf5\x78\x56\x42\xf3\x78\x56\xc2\xb3\x12\x9e\x95\xf0\xac\x84\x67\x25\x3c\x2b\xe1\x59\x09\xe3\xef\xf2\xac\x04\xaa\xda\xdf\x38\x2b\xa1\xd3\xe6\x7b\x52\xc2\x93\x12\x9e\x94\xf0\xa4\x84\x27\x25\x3c\x29\xe1\x49\x09\x4f\x4a\x98\x97\xf4\xa4\x84\x61\x39\x4f\x4a\x98\x16\xf7\xa4\x84\x27\x25\x3c\x29\x61\xfc\x7c\x6b\xa4\xc4\x3e\x58\x7d\x6e\x5f\x3c\x9f\x07\x9a\xb8\xd2\xbc\x19\x27\x67\x82\xeb\x36\xb9\x52\xcd\x41\xc6\xa2\xcf\x62\x48\xdd\x31\xe9\xda\xd6\xc9\x13\x62\x76\xc2\xc0\x3b\x92\xaf\x83\x84\xaf\x28\xcc\x83\xdd\x7c\x3c\x2d\x5e\x8b\x17\xf0\x4c\x5d\x58\xd6\x90\x9e\xa4\xf2\xc5\x7b\x99\x8e\x12\x0e\x29\xcf\xf1\x07\xd5\xc7\x59\x0c\x29\x0f\x31\xc4\x08\x74\xb3\x44\xef\x59\x2a\xfe\x94\xc4\x87\xcd\x56\x4a\xc7\x8c\x13\xf5\x14\xbe\x30\xa5\x77\xcb\xe9\xd9\xf5\x5c\xed\xaf\x1f\x2e\x6f\xae\xe6\x6f\xcf\xde\x9d\xcd\x4f\x31\x6b\xca\x72\x71\x75\x04\x6f\x16\xcb\xe5\xe2\xc2\xdc\xe5\x01\x9b\x38\x4d\x5b\x4b\x44\xf9\xe5\xe2\x0a\xf1\x6b\xf5\x35\x86\x05\xa2\xc3\xae\x1a\x47\xd8\x19\x15\x44\x19\xdf\x20\x36\x29\x71\x6f\x64\x99\x2c\xf7\xe7\x3f\x51\x27\xe2\x65\xbd\xc6\xb4\x61\x2e\x46\xf9\x8e\x45\x4f\xcd\x31\x2a\xd5\x20\x28\x85\xa0\xb8\x0d\xdb\x0d\xea\x7c\x82\x5d\xf0\x6c\x8b\xe1\x61\x5c\x2c\x68\xd7\xf5\x57\x7f\xb3\x8b\x5a\xa3\x96\x16\x0b\xdb\x6d\xde\xd2\x9f\x76\x52\xd2\x6d\x2d\xf9\x3a\xae\xc3\xa5\xee\xaf\xa9\xb2\x55\xca\x2e\x1e\x65\xe1\x93\xf2\x04\x88\xd7\x87\x15\x36\x21\xbe\x32\x23\x3f\x6e\x83\xd5\xb6\xc4\x56\x6a\x4e\xe3\x7b\x9c\xbd\xae\x6d\xac\xcf\xe2\x02\xfd\xa8\x2b\x95\x08\xae\x85\xc5\xc2\x7b\x31\x5f\xbe\x5f\x9c\x36\x56\xdd\xe2\xef\xa4\x77\x21\x42\x64\x51\x6c\xf6\x4b\x25\xe2\xec\xb2\xfc\xb3\xf4\x2c\xcc\xff\x2c\xae\xe6\x37\xcb\xe9\x16\xea\xee\x57\xe1\x0b\x23\x13\x34\x56\x5f\x4f\x28\x84\xca\x39\x59\xb5\x27\xbe\x90\x6a\x78\x83\x72\xa9\x26\x89\xf6\xf3\x1c\xb5\x75\xe9\xbb\xbf\xfa\x51\x9b\x5c\xa9\xa6\xf5\x6a\xbf\x0f\x9f\x80\xe5\x8d\x5b\x77\x8c\x01\x76\x6f\x7e\x4b\x2c\x59\x67\xb1\xf8\x89\x35\xe5\x90\x99\x2a\x02\x09\x70\x32\x95\xf5\x74\xb1\xb7\x51\x19\xcf\xa9\xf8\x4e\x13\xb6\x53\xae\xf6\xb8\xbd\x68\x8c\xeb\x9c\x82\xd4\x34\xa1\x34\xe5\x06\x86\x90\x69\x44\x68\x22\x73\x6f\x8f\xd3\x99\x94\x0c\xd9\x6e\xc9\xcc\xa9\xa8\x4c\x23\x22\x93\x64\x0a\x18\xa0\x31\x9b\x7c\x25\xf2\xac\xa2\x23\x31\x35\xac\x08\xb9\x0d\x90\xae\x2e\x36\x04\xa6\x8b\xf5\xcb\x82\xbc\x7c\xee\xdd\xcc\x92\xba\x34\x22\x2e\x95\x63\x00\xa2\xf3\xc7\x68\x4b\x8a\xab\xc6\x10\x69\xd9\x60\x27\x31\x7e\x41\x5a\xca\x52\xcb\x4d\x22\xa4\x76\x09\x4b\x3d\x33\x89\x5a\xf5\xb5\x74\x65\x0f\x2f\x89\x10\xdc\x05\xe9\xfb\x59\x49\x8c\xd8\xb6\x1f\x82\xde\xb3\x00\xb9\x4a\x4d\xc1\x48\x9a\xf1\x91\x80\x19\x55\xee\xd9\xc8\x09\xb8\xc8\xa9\x99\x48\x23\x1e\x92\xe6\xb2\xe0\xde\x5d\x01\xe1\xaa\x80\x35\x98\xf4\x32\x90\x7a\xaa\x11\x21\xbd\x87\x7f\xb4\xf4\x53\xd0\xb2\x8f\x5a\x9a\x11\x33\xc9\x7a\xb8\xc7\x3e\x92\x11\x2f\x3a\x67\x1e\x47\x29\x46\xbc\xe4\x3a\xef\xa8\x27\x18\xf1\x32\x9b\xac\xa3\x9e\x5e\xb4\x92\xfa\xe3\x2b\x9d\xd4\x57\x14\xa9\x1a\xc6\xd1\x01\xb5\x88\xd5\x0c\x51\x68\x45\x1a\xa9\x48\xa2\x14\x49\x84\x22\x89\x4e\xa4\x92\x89\x44\x2a\xd1\x86\x48\xb4\xa2\x11\x2d\x49\x44\x4b\x0a\xd1\x92\x40\xb4\xa4\x0f\x2d\xc9\x43\x17\xd4\xa1\x35\x71\x48\xa4\x0d\x29\xa4\xa1\x13\xca\x70\x12\xc2\x70\x0a\xba\xd0\x80\x2c\x44\x5e\x2e\xc6\xa8\xc2\x82\x13\xc4\x5c\xac\xc6\x88\xc2\x1a\x23\x88\x10\xdb\x4b\x13\x76\xf8\x40\xe4\xcd\xa2\x49\x12\x6a\xd9\x40\x84\xc4\x1e\x8a\xb0\xc3\x05\x62\x0e\x7b\x3a\x82\xd0\x80\x09\xc4\x9d\xfc\x9c\xf3\x80\x06\x2c\x60\x75\x5d\x42\xc8\x1d\xe4\x00\x6b\x64\x1f\xe6\xca\x3a\xc6\x00\x36\x41\x39\xdc\x55\x48\xcb\xff\x69\x89\x3e\x84\xdc\xae\xd0\x3e\x9a\xcf\xc2\x72\x39\x44\xf2\x61\x6f\x58\x3a\x84\xb9\x45\xf1\xa1\x94\xfa\x39\x4b\x32\x15\xc1\xe7\x98\xde\xb3\x24\xf7\xec\xa8\x3d\x34\xb1\x47\x84\xde\x6c\x48\x3d\x3a\xa5\xe7\x42\xe7\x4b\xa6\xf3\x9e\x5b\xe3\x6b\x45\xe6\x19\x52\x79\x80\x9a\x89\xa3\x44\x1e\x91\x24\xee\xa3\xf1\x7a\xf9\x3a\x94\xed\xa7\x4e\xe2\xf5\xb3\x75\xb8\x75\x5e\x4b\xe1\x69\xb9\x3a\x84\xdc\x0e\x81\xa7\x67\xea\xb0\xca\xbf\x92\xbe\x1b\xe4\xe9\x10\x52\x1b\xe4\xdd\x08\x4b\x47\xed\xa9\x7e\x8e\x0e\x67\xba\x1d\x8f\x08\x98\xeb\xae\x31\x7a\xff\xbe\x68\x80\x74\x7e\xce\x80\x9d\x43\xda\x65\x47\xb8\x39\xaa\x86\xba\x97\x99\xeb\x50\x70\x48\x8b\x6f\x8b\x97\xd3\xab\x93\x11\x32\x3b\x8a\x67\x57\x47\x05\xc7\x61\xf5\x3a\x8a\xe7\x61\xea\x0d\xa9\x95\xee\x0b\xa7\x47\x8d\xa4\x37\x16\x45\x6f\x32\xbd\x20\x81\x72\xa3\x11\x6e\x24\xba\xcd\x86\x6c\xb3\xa0\xda\x28\x44\x1b\x85\x66\x23\x91\x6c\x34\x8a\x8d\x42\xb0\x11\xe9\x35\x0b\x72\xcd\x86\x5a\xb3\x23\xd6\xec\x68\x35\x3b\x52\xcd\x8e\x52\xb3\x23\xd4\xec\xe9\x34\x12\x99\x96\x35\xcf\xee\xe7\x2c\xda\x1c\xd8\x86\x8f\xaf\x5c\xa8\x9b\x4a\xeb\x86\xb2\xd4\xbf\x74\x54\x4c\xf3\xcc\xaf\xb0\xad\xd2\x45\xe3\x9e\x67\x4d\x87\xea\xb1\xb5\xfa\x10\x05\xd9\xe2\x81\x27\x49\xb0\x9e\xf8\x7b\x3f\xd4\xde\x84\xfb\x48\x71\x98\x13\xf5\x14\xc7\x12\x79\xf0\xc8\x95\x3b\xea\x20\x21\x3f\xd9\xe8\xc0\xd8\x56\x29\x44\x71\xa4\xee\xee\xb9\x32\x40\xbe\x43\x7a\x8b\xc7\xa6\xf5\x14\xfb\xef\x93\x2a\x28\x95\x95\x6c\x25\xbd\xcb\x23\x09\x6e\xa9\x8a\xe5\x9e\x72\xe2\x23\x14\xb1\x61\x20\x34\x48\x2b\x25\x0d\x53\x7f\xfe\xed\x56\xbc\xe5\xd6\x1c\x36\x4c\xf8\xfd\xa7\x87\x1f\x5e\x26\x3c\xcd\x5e\x3e\xfc\xf0\xb2\x40\xfd\x4e\xd4\x31\xf1\x34\x6f\xe0\xd8\x8c\x45\xcc\xf5\x24\x11\xdc\x5e\xb4\x8a\xf7\x86\xff\xcd\xf8\x97\xac\x7f\x44\x19\xf9\x27\xb7\x67\x0c\xff\x62\xf0\x5b\xb3\x7b\xfb\xa0\xac\xd6\x14\x4b\xd8\x63\x3e\xe6\xc5\xb1\x7a\xc7\x92\xcf\xeb\xf8\x31\x82\x75\x90\xee\x43\xa6\xd4\xe1\xfc\x4b\x26\x8e\x71\x62\x60\x0e\x06\x98\x51\xb5\x5c\xc5\xd1\x7d\x18\xac\xb2\x01\x15\xce\x31\x7c\x79\x7a\xbb\x65\x49\x7f\x25\x8f\x21\x2d\x74\x0f\x03\xbf\xb9\x0b\x59\xf4\x79\xe0\xdf\xc3\x78\x93\x5e\xb1\x88\xf7\x2b\x15\xcd\x9c\x95\xf3\x2f\x1f\x5e\x42\x8c\x97\x8f\xb6\x03\x9f\x12\x3e\x58\xa4\xb3\x62\x88\x3e\x29\xaa\x95\xdf\xd9\xf3\x1e\x1b\x3e\xc1\xe6\x58\xd5\x24\x1f\xf2\x6e\x7c\x01\x20\x8c\x63\x25\x75\x54\x71\xa4\x61\x13\xdb\xcd\x14\xa4\xf9\xe7\x67\x88\x58\xd9\x85\x37\xc9\xbb\xc5\xf5\xc5\x6c\xd9\x82\x58\x66\xd7\x3f\x9f\x2e\x3e\x5e\x1e\xc1\xf5\xec\xe3\xb0\x3f\xc1\xf8\x5d\xe1\x58\xf3\x8a\x91\x02\xc5\xfb\x47\x7e\x76\x3d\xfb\xd8\xb7\x8a\x05\x59\x38\xb0\x31\x1a\x0d\x84\xce\xc6\x9f\x0d\xb4\x69\xa3\x97\x16\xf2\xbf\x2c\x54\xdb\x87\xac\x4b\x71\x37\x7f\x94\xdd\xdf\x37\x90\xf3\xc5\xc3\xed\xfa\xfb\xcb\xc8\x8a\x84\x1c\xba\x63\xe2\x5a\xab\xf0\x4a\xfc\xb8\x65\xbc\x1a\xc9\xb3\x85\x58\x6d\x4d\xd6\xd2\x6c\x68\xcf\x70\xb1\xd4\x8e\xe7\xc3\x3b\x96\x5f\x7c\x33\x64\xbe\x35\x5c\xaf\x45\x6b\xaa\xe1\x35\x62\x48\x30\x26\x89\xda\x8b\x76\xed\x0d\x2e\x57\xbc\x7c\xd8\x18\x8b\x6f\x0c\xa3\x53\xb5\x03\x40\xac\x0a\x4a\x7d\xa9\x98\x4d\xb2\x39\x86\xcf\xca\xe6\xd0\xce\x2e\x9e\xfa\x2c\x7d\x11\x1b\x9c\x4d\xed\xdb\x55\xbc\xc6\xc8\x1e\xd1\xf5\x3b\x57\xb3\x55\xb4\x84\xc6\xef\xf0\x62\x71\x3a\xc7\x22\xe6\x6f\x17\xe7\x8b\xeb\x23\xf8\xe5\xd3\xf5\xec\xd7\x23\xb8\x59\xce\x96\x37\xff\x3f\x7b\xef\xf7\xdc\x36\x8e\xe5\xf1\xbe\xdf\xbf\xe2\x3c\x6c\x55\x66\xaa\x64\x27\xbd\x3d\x33\x5b\x3b\x6f\x4a\xac\xee\xb8\xc6\xb6\x7c\x6d\x65\xba\x53\x5d\x5b\x31\x2d\xc1\x16\x6f\x28\x52\x4b\x52\x76\x3c\x5b\xfb\xbf\xdf\x02\xc0\x5f\x20\x41\x12\xe7\x00\x94\xd3\xdb\xc0\xcb\x78\xd2\xc2\x21\x08\x82\x20\x70\x0e\x3e\xdf\x33\x7e\x3e\xcd\xcc\xff\x74\xd2\x69\x90\x41\x15\xd1\x1e\x83\xdf\x89\x06\x1b\xfc\x4e\xdc\xd1\xc0\xef\xca\xf7\xde\xe4\x65\x1d\x3f\x70\xd3\x96\x63\x18\x9b\x54\x60\x58\x14\x45\xec\xca\xaa\xd5\x95\x3c\x10\x1f\x66\x26\x6f\x15\x21\x8c\x8a\x0f\x9d\x1a\x85\x4b\x11\xc4\x24\xed\xcd\x32\xea\x64\xf3\xa4\xa8\x27\x6d\xb7\xc5\xc8\xef\x31\xd0\x61\xc4\x1e\x59\xbc\x59\xb1\xdd\x3e\x0a\x72\x83\xb9\x0c\x1d\xb3\x6d\x8d\xbf\x0b\xe5\x7a\x06\xf5\xd5\x09\x67\x2e\xc2\x8a\xbc\x6a\xb9\x4f\xe3\xf3\x7a\x1c\xec\xf8\x9f\x77\xb5\xa3\xc5\xcc\xaf\xde\x01\x48\xc4\xe8\x2e\x4e\xe6\x84\x19\x64\xdb\xe4\x10\x6d\x44\x38\x03\x13\xa4\x16\xe0\x81\x60\x16\xf7\x49\x54\xc4\xbc\x8a\xb5\x9c\x88\x7f\xdc\xfd\xdb\xff\x44\xc1\x3d\x8b\xbe\xf0\x2e\xf9\x5f\xc3\xe8\x67\x83\x70\x4f\x59\x96\x44\x4f\xac\xe4\xf3\x84\xad\x37\x6f\x32\x39\xe9\x9e\xc2\xf8\x64\xb9\x0b\xe3\x39\x1e\x33\xb5\x7b\xf0\x97\x9d\x6b\x62\x1f\xbe\xba\x44\x8e\x92\x67\x96\xc2\x7d\x72\x90\x98\x4a\x1d\x19\x35\xea\xcd\x07\xfe\xee\xb1\x78\xfd\x52\xac\x0b\xc2\xac\x7a\xf6\x33\x71\x0c\x89\xf1\x76\xb2\x0d\xdc\xbf\x14\x81\x95\x17\xd3\xa7\x2f\x44\x9b\xc2\x38\xdc\x1d\x76\x8d\xf0\xa7\x8c\xd6\x94\x1a\x31\x22\xfa\x6b\x1c\x00\x2b\x56\xc0\xd2\xff\xf7\x53\x92\x02\xfb\x16\xf0\x26\xce\x20\x7c\x68\x40\xac\x19\xec\x0f\xf7\x51\x98\x99\xb9\xc8\x00\x92\x78\xcd\x80\x3d\x71\xa3\x3f\xbc\xe3\x4d\x3e\xe4\x2c\x9b\x15\x11\xc6\x30\xfe\x42\x63\x1c\x1a\xef\x4c\x09\x7d\xd6\xd6\x4f\xe1\x3c\x87\x67\xf1\x83\x38\xc9\x61\x17\x7c\x35\xed\x81\x38\x63\xb5\xd7\x53\x9e\x8e\x0a\x1f\x8b\xc7\x1e\xe4\xe2\x50\x9d\xbc\x46\x05\x0b\x1b\x9c\x77\xdb\x47\x49\xbe\xe2\xa3\x7a\xf2\xd1\x7f\x5d\x5c\x09\x53\x93\xf4\xc9\x29\x2f\x64\x78\xb8\x44\xe7\x29\xa8\xde\x85\xea\x49\xf2\x7e\xe2\x2f\x83\x21\x4d\x57\x2f\xb3\xbb\x0b\xc2\xeb\x8b\xe5\xea\xcb\xea\xf3\x35\x7a\x55\x08\x70\x71\x7e\xb5\x10\xeb\xc1\x0f\xff\x58\x9c\x7d\x99\xdf\x2c\xe6\xf5\xff\x7b\x3f\xbf\x99\xc1\xc7\xc5\x7c\x75\x39\xbf\x36\x81\x19\x4c\xc3\x95\x27\xfa\x06\x1b\xd5\xe4\xed\x35\xfa\x61\xf3\x96\x50\x15\xde\xcf\x4d\x62\x8b\x27\x65\xc7\x8c\x6f\x54\xd4\x65\x86\xf9\x6b\x61\xac\x3e\x31\x1c\x19\x99\xfc\xed\xc0\x5f\xaf\x67\x29\x5c\x9c\x2c\xe5\x1f\x0f\x31\x2d\x0b\xd7\x30\x72\x3e\x17\x2f\xd9\x43\x9a\xec\xc4\x0b\x73\x9b\x07\xeb\xaf\x9b\x34\x7c\x62\x69\xa1\xf9\x97\xc1\xfc\xfa\xdc\x48\xa6\x0f\x29\x32\x91\x93\x24\xfd\x48\x29\xf0\x7b\x9f\x37\x4a\xa6\xd1\xe9\x43\x27\xb6\x41\x19\x08\xb2\x26\xec\x83\x34\xd8\xb1\x9c\xa5\x99\x1a\x91\x33\x8e\x61\xe2\x0e\x66\x9b\xee\x17\xca\x72\x52\x88\x6a\x19\xfe\x9c\x92\x9b\x3e\xc0\xaa\xd5\xc8\x42\x18\x49\xa0\x51\x2b\x21\x25\x3a\x9a\x74\x30\xd1\x9a\xa4\x8c\xad\xf7\x2f\xb0\x61\x0f\xc1\x21\xca\x67\x85\xf2\xcc\x33\x39\xa1\x4b\xb9\x30\xac\xc9\xd0\x4f\x42\xa1\xb0\x3a\x0f\x5d\xb3\xfe\x28\xbb\x3b\xbe\x5f\xda\xab\xe7\x43\xc5\x64\x58\x23\xd4\x4f\x21\x7b\xc6\x35\x36\xa9\x17\xb3\x18\x54\x81\x32\x6c\xc1\x46\x68\x47\x16\x17\x59\x51\xec\x44\x77\x64\x31\x90\xde\x41\x5a\xac\x33\xbe\x67\x83\x02\x3c\x68\xb3\x4d\xc1\x1e\xbd\x0c\x0f\xda\xe4\xe4\xb2\x3d\xb2\x98\x88\xf7\xa0\x8d\x96\x6a\x75\xfd\x12\x3e\x84\xee\xa8\x24\x7f\x06\x85\x7c\xd0\x86\x6b\xe1\x9f\x3e\x39\x1f\xb4\xc9\x96\xfc\x8f\x46\xd4\x07\x6d\x72\x50\x05\x87\x98\x6d\x0c\x5c\x4b\x01\xc9\x62\x26\x08\x84\x36\x5b\x04\x39\xc6\x65\x81\x08\x2f\x2f\x8b\x5d\xe4\xc1\xb2\x91\x08\x2a\x5a\xe2\x6c\xe6\xb5\x90\x0b\x6a\xd9\x9b\x78\x2d\x61\x29\x16\x24\x8b\x91\x64\x10\x7e\x5c\x24\x2d\xc5\xcd\xb6\x70\x10\x6d\xb2\xd5\x11\x28\x0d\xf9\x20\xe2\x04\xd6\x94\x1b\xea\x11\x11\xc2\x7f\x2c\x35\xa2\x43\x1a\x29\x21\xe2\x5c\xde\x92\x1e\xea\x13\x14\x22\x7d\x38\xbb\x02\x44\xad\x0f\x07\x65\x30\x8c\x03\x1a\x68\xab\x23\x62\x44\xa4\xf1\x30\x2a\x49\x44\x1c\x07\xa7\x03\xc2\x44\xd4\xa1\x25\x85\x8c\xfa\xe4\x89\x28\xdf\x08\xe5\xee\x35\x22\x45\xe4\x9b\xd7\xa3\x23\x52\xaa\x08\x6d\xd4\xb5\xb4\x91\x2c\x46\x02\x47\x68\xab\x8e\xb9\x94\xa2\xa9\xce\xe9\x14\x59\xba\xe2\x48\x6d\xc9\x23\xab\xa5\x97\xbd\x44\x52\xa3\x99\x83\x42\x49\xf8\x69\x4f\xb7\x78\xeb\x78\xa9\xd1\x66\x9b\xf2\x4a\xbd\xa2\x49\x54\xab\x6d\x91\xa5\x96\x74\x12\xd5\x6c\xaf\xd4\x92\x4e\x40\x89\x7a\x11\x55\x70\x49\x2f\xa3\x44\xb5\xad\xca\x2e\xe9\xc5\x94\x1c\xd8\x7e\xf7\xd7\xae\x7c\x54\x2d\xa9\x44\x7f\xa8\x64\x09\x26\x59\xf0\xa9\xc5\xa8\x72\x4c\xad\xba\xe8\xf4\x57\x44\x69\xa6\x76\x55\x64\xb6\x2e\xa2\x4c\x53\xbb\x2a\x3e\x81\x16\x59\xb2\x49\x57\x19\x9f\xfe\xca\x52\xbe\xa9\x65\x82\x9e\x86\xcb\x5a\xca\x69\xc0\x08\x2e\x11\x96\xb5\xac\x53\xbf\x11\x64\x4a\x2e\x37\x12\x4f\x2d\x4b\x76\xf9\xbd\x88\x72\x4f\xb2\x50\x44\x9f\x64\x71\x22\xfd\x24\xcb\x24\x02\x50\xb2\x4c\x21\x03\x25\x8b\x81\x18\x14\xc5\x43\x33\x2a\x09\x85\x5f\xd8\x4b\x09\xa9\x71\x61\x28\xfc\x36\xa4\x12\x92\xea\x97\x87\xa2\xee\x98\x2a\x39\x29\x8d\x48\x14\xda\xa6\x4e\x54\x4a\x91\x8a\x22\x6c\x41\x7a\xa5\xa5\x0a\xc1\x28\x7c\x6f\x4e\x28\x30\x55\x34\x7a\x4c\x66\x8a\xbc\x69\x1c\x12\x9b\xc2\xaf\xf0\x5b\xe2\x54\x1a\xc9\x29\xc2\x9b\xa5\x91\xa8\x1a\x17\x9e\x42\x5f\x47\xd5\x80\xd2\xc9\x4f\xe1\xbb\xa3\x47\xae\xca\x81\xe9\x3e\xd1\xaa\x6a\x22\xa2\xbe\xbd\x8e\xa4\xab\x64\x31\x10\xb0\xb2\xf4\x50\x6a\x64\xac\x28\xe1\x80\xfe\xef\x4c\x53\xcc\xca\x6a\xec\x12\x5d\xf6\xd6\x12\x58\x4d\x23\x54\x21\x2c\x59\xd0\x72\x58\xb2\x58\x84\x0c\xec\xa4\xb1\x64\xa1\x0b\x64\xc9\xe2\x2e\xe2\x41\x16\xcb\x6a\x59\x9b\x38\xde\x61\x25\x95\x25\x8b\x81\x60\x16\x7a\x24\x48\x81\xad\x01\xd9\x2c\xfc\x8c\xd7\x09\x72\xf4\x8a\x67\xa1\x6d\xf7\x89\x6d\xb5\x24\xb4\xd0\x76\x7b\x25\xb7\x7a\x85\xb4\x88\x41\x25\x55\x78\xab\x2b\xa7\x45\x70\xd2\x6a\xe4\xb7\x5a\xa2\x5a\xb4\xe5\x61\x8f\x08\x97\x2a\xad\x85\xff\x78\x0f\x4a\x71\x21\x05\x22\x0b\x93\x4e\x05\xb9\x64\x79\x9d\xa8\x0f\x51\xf0\x4a\x96\xa1\x24\x14\xf5\x81\x1c\xc2\xab\x11\x8f\x09\x75\x39\x89\x54\x75\xe5\xba\xa8\x6b\xad\x3a\x46\xa3\x11\xed\xa2\x8c\x04\xc7\x39\x23\x64\x71\x9f\x39\xa2\x68\xef\x58\xfe\x08\xf2\xd1\x29\x77\xb2\x5f\xb2\x4c\x21\xfe\x25\x8b\x33\x09\x30\x59\x68\x7e\x69\x82\x1c\x98\x5a\x15\x27\x0a\xa6\xd6\x45\x49\x83\xa9\x55\x29\x02\x61\xaa\x05\x82\x4c\x98\x6a\x80\xe2\x53\xc7\x4b\x86\xb5\x6a\x52\x62\x00\x14\xf9\x30\xb5\x2e\xc5\x91\x4f\x92\x12\x53\x2b\x5b\x78\xe2\xe9\xb2\x62\xaa\x05\x3b\x3f\xbc\x8d\xc4\x58\xbf\x0d\xbc\xef\xdb\x46\x6e\xac\xd7\x06\xc1\x07\x6f\x2b\x3d\xa6\x6d\x8b\x99\x00\x59\x5d\x1e\x50\xec\x05\x38\x90\x76\x2e\x94\x69\x30\x4c\x00\x0c\xd3\xe0\xbf\xd5\x47\x6c\x8b\xfb\x31\x57\xad\x7a\xfa\xf1\xad\xac\x62\x24\x4b\xd5\xe8\x05\x71\x8a\x78\xc3\xe2\x5c\x1e\x51\x16\xbc\xa5\x58\x67\x95\x1e\xbe\xd2\x63\x84\x3c\xa2\xcc\x3f\xb1\xa5\x76\x16\xff\x5e\x0b\xb0\xc7\xfc\xf3\xb7\xd7\x64\xe1\x3e\x3e\x13\xa1\xcb\x05\x4e\xb1\x36\xc5\x06\xdf\xb2\x6d\xea\x38\x94\x69\xc9\xe1\x3e\x10\x27\x0f\x9b\x18\x02\x2a\x91\x3c\x58\x10\x04\x9b\x30\x65\x6b\x2c\xfa\x02\x4e\xfd\x39\x67\x65\x13\xbe\x27\x47\x8e\xee\x39\x57\x0d\xb5\x3e\xb8\xfa\x51\x3a\x61\x0e\x19\x2b\xf0\x18\x31\x10\x28\x5b\x95\x8c\x45\x6c\x9d\x77\xf3\x90\xec\x83\x8c\xff\x95\x26\x87\x47\x7c\x6c\x47\x50\xfe\x72\x08\x76\xe9\xd7\xb3\xf3\x9b\x85\xfc\x8e\xa3\xe9\xd7\x66\x59\x2d\xaf\x67\xf0\x7e\xb9\x5a\x2d\x2f\x8f\x73\x6c\x44\xdb\x6e\xb4\x95\xd5\x72\x1c\x41\x6d\xd7\x91\x77\x89\xaa\x16\x1f\x76\xf5\xe8\xa3\xbd\x9a\x61\x9c\xb3\x47\xf4\xfe\xb7\xd0\x93\xe3\xb5\xff\xf6\x17\xbb\xf7\xfa\xaa\x79\x0f\x36\xaf\x0b\x7f\x5b\x76\x41\xfc\xa2\x8e\x72\x7c\x08\x2c\x88\x22\xf9\xde\x35\x5f\x8e\xe6\x60\x47\x59\x2c\x5e\xda\x4b\x96\x6f\x5f\x93\xbc\xba\x69\x36\xe3\x7b\x9f\x41\x95\xc6\x5a\xcf\xa2\x77\xc5\x23\xf8\xb2\x13\xf6\xee\x1a\xe1\x5d\xca\x54\x2a\x9c\xa0\xaa\x17\x5b\xfa\xef\x58\x8c\xcb\xe6\x53\x59\xdc\xa7\x89\x48\x36\x54\xe7\x0a\x6e\x28\xa0\x10\x1c\x33\x0d\x42\x61\x1f\x14\xa4\x95\xe5\x3b\x51\x90\x4a\x4d\x5f\x9c\xf5\x49\xd2\xcb\xc5\xea\xe3\xf2\xac\x25\xad\x28\xff\x4d\x1e\x28\x2d\xff\xcf\xfc\x57\xfc\x77\xa3\xac\x7b\x5e\xdb\x11\x07\x4a\x8b\xbf\x2f\xe6\xab\xc5\xed\xea\x38\x5f\x94\xee\x7d\x52\x4d\x90\x7c\x11\x75\x2f\x92\xab\x12\xdc\x2e\x75\x9f\x53\xab\xca\x47\x64\x5c\x3b\xd3\xa4\xa1\x39\xfe\x9e\x44\x97\x0c\xe7\x7b\xd9\x93\x58\xb6\x4d\x8d\x32\xee\xf7\xd1\x0b\x04\x45\xaf\x93\x0f\x47\x05\x0f\x39\x4b\xe1\xae\x51\xbd\x39\x3b\x7b\x34\xda\xb8\x78\x34\xda\xa3\xd1\x55\xf1\x68\xb4\x47\xa3\x3d\x1a\xdd\x5f\x7e\x57\x68\xb4\xee\xab\xed\x19\x69\xcf\x48\x7b\x46\xda\x33\xd2\x9e\x91\xf6\x8c\xb4\x67\xa4\x3d\x23\xed\x19\x69\xcf\x48\x7b\x46\xda\xb4\xaa\x67\xa4\x3d\x23\xed\x19\x69\x7d\xf1\x8c\xf4\x40\xf1\x8c\xb4\x67\xa4\x3d\x23\x5d\x36\xda\x33\xd2\x9e\x91\x6e\x16\xcf\x48\x7b\x46\x1a\x6f\xc4\x33\xd2\x9e\x91\x36\x0f\x7c\x78\x58\xda\xc3\xd2\x1e\x96\xf6\xb0\xb4\x87\xa5\x3d\x2c\x0d\x1e\x96\xf6\xb0\xf4\x68\x55\x0f\x4b\x13\x6a\x7a\x58\xda\xa8\xb2\x87\xa5\x3d\x2c\x3d\xde\x16\x0c\x2c\xdd\x4e\x57\x77\xc3\x17\x62\xaf\x97\xb3\x4e\x5c\x1e\x6b\x6a\x8a\xbd\x20\xa6\x21\xca\x06\xef\x5a\x93\xb6\x2e\x00\xb1\xbe\x85\x7b\x96\x3f\x33\x66\xee\xa9\xce\x9f\x13\x0d\x7d\x6b\xec\xdd\xa1\x9c\x50\xdf\xb0\x38\xd9\x85\x71\x90\x27\xaf\xc0\x57\x9f\xd5\x17\xa7\x18\x99\x6c\x1c\xd0\xda\xd5\x89\x0b\x35\xfa\xb6\x5c\x3e\x8b\x71\x81\x59\xd8\x60\xd3\x14\x02\x3e\x55\x21\xd8\xc0\x0d\x34\x14\x06\xe8\x43\x08\x5c\xa5\x2e\x84\x63\x0f\x25\x7a\x33\x8d\x52\x1a\x5a\xed\xc3\xb4\x69\x0d\xd1\x16\xdb\x69\x10\xb5\xa9\x0d\xf1\x56\xbb\xa9\x10\xa9\xe9\x0d\xc1\x62\xa8\x83\x3d\xcb\x03\xb6\x9e\x61\xc7\x4c\x0f\x4c\xc4\xf5\xc0\x18\xdb\x43\xb3\x58\xf0\x40\x3d\x7c\x0f\xc9\x66\x83\x09\x3a\x16\xe3\x03\xe3\x9c\x0f\xc9\xe6\x6a\x1b\x66\x53\xb0\x3e\x30\xc6\xfb\x90\x2c\xd6\x8c\x50\x0f\xf3\x43\xb2\xda\xe0\x84\xfa\xb8\x1f\x92\xdd\x9a\x15\x72\x89\xba\x40\x27\x5a\xa6\xe3\x7f\x68\x03\xac\x39\x9f\xb7\x19\x20\xab\x21\xa0\x1e\x11\x56\x39\x20\x92\xe1\x41\x98\x0a\xb1\x78\x6e\x16\x27\x31\x48\x17\x0c\x11\x38\x9f\xf1\xad\x59\x22\x78\xc5\x75\x8f\x13\xac\x08\x26\x43\x8b\x60\x12\xbc\x08\x0c\x10\x23\xf2\xc7\x6b\x00\x33\x22\xdb\xac\x83\x55\x3a\xd4\x88\x36\x81\x76\xf0\xa4\x16\x6e\x44\xec\x55\xa7\x88\x12\x8c\x61\x4a\x34\x8b\xf5\xc7\x52\x17\xab\x22\xd9\x54\xe2\x5b\x43\xf1\x2a\x5a\x8b\x25\xe2\x34\x88\x2c\xd1\x06\x57\xf3\xde\x35\xd8\x12\xad\xb5\x8a\xcb\xa4\x8d\x2e\xd1\x3e\x49\xed\xe8\x99\x55\xc4\x0f\x24\x92\xde\x3f\x56\x49\x51\x34\xd0\x45\xd2\x04\xf6\xd4\x88\xa7\x91\xac\xf6\xa3\x4f\xd4\x49\x65\x02\xfc\x09\xa6\x42\xa0\x60\x42\x0c\x0a\xc6\x51\x28\x92\xcd\x7b\x36\x09\x0e\x05\xd3\x20\x51\x30\x8a\x45\x51\xe7\x44\x21\x0b\xd7\x8b\x46\x91\xac\x36\x70\xaa\x3e\x3c\xca\xc6\x6e\x81\x54\x8d\x22\x52\x36\xd7\x68\x62\x55\x7a\x4c\xca\xc6\xba\x8a\x56\xe9\x51\x29\x47\xf6\xff\xfa\x4e\x67\xff\x9d\x9d\x7d\x0d\x62\xe5\x0c\x99\x02\x62\x54\x1a\x2c\xd1\x29\xb0\xc4\xa7\xc0\x0e\xa1\x02\x3b\x8c\x0a\xec\x50\x2a\xb0\xc6\xa9\xc0\x16\xa9\xea\x1a\xa0\x44\x4f\x35\x56\x28\x81\x5c\x70\x85\x57\x81\x2b\xc4\xaa\xd7\x10\x36\x9e\xda\x67\x08\x1d\xdc\xed\x33\x84\x8e\xf0\x82\x53\xe4\x0a\xdc\x61\x57\x60\x8b\x5e\x81\x15\x7e\x05\x6e\x11\x2c\x30\xc1\xb0\x48\x5f\x85\x12\xdd\x1a\x42\xb1\xac\xd7\x94\xa7\x7a\x1c\x8b\xe6\x48\x6d\x22\x5c\xed\x23\x7a\xb4\xb5\x99\x06\xe3\x52\xb1\x2c\xaa\x63\xa5\x0f\xe5\x6a\xa0\x59\x24\xd3\xbd\x38\x97\x4d\x37\x0c\x21\x5d\xc2\xdb\x42\x75\x84\xf4\x62\x5d\x0d\x4c\xcb\x76\x7b\xdd\x44\xbb\x1a\xa8\x16\xad\xc9\xd3\xe0\x5d\x30\x09\xe2\x05\x63\x98\x17\x7d\xa7\xc9\xfa\x51\x2f\xda\xe3\x2a\xf1\x30\x3d\xee\x45\xb2\xe9\x1e\x11\x83\x69\x30\x31\x98\x16\x15\x83\x49\x70\x31\x18\x43\xc6\xc8\xe3\x60\xd6\x8b\x8d\xd1\x7a\xb6\xee\x55\x1d\x3a\x66\xef\xbc\x73\xee\x6f\x69\x22\x67\x6a\xf8\x86\x66\x91\x1a\xf2\x71\x84\x9e\x81\x23\xfc\x0c\xe8\x08\x1a\xd8\x87\x9e\x5c\xa0\x68\xe0\x00\x47\x03\xe7\x51\x34\x4b\x2c\x0d\x5e\x31\x86\xe6\x80\x50\x83\x89\x28\x35\x18\x27\xd5\x88\x2e\xfe\x31\x5a\x8d\x1e\xd2\x16\x84\x5b\x2f\xb1\x46\x8e\x21\xd5\x94\x9b\x86\x5a\xa3\xed\x4c\x1c\x93\x6e\x30\x1d\xed\x06\x06\xc4\x1b\xc9\x68\xb9\xf4\xee\xa3\xde\x48\x46\x15\x52\xae\x4b\xbe\xd1\x1e\x56\x8b\x96\xd3\xd2\x6f\x34\x87\xbc\x42\xcc\xe9\xa2\x8a\xc4\xd3\x43\x6c\x28\xaa\xe8\x20\x8c\x66\x4d\xce\xc1\x74\xf4\x1c\x8c\x13\x74\xe4\x68\xfa\x44\x14\x1d\x8c\x92\x74\x3e\x54\xe7\x3a\x54\xa7\x92\x75\xd4\x19\x57\x39\xf0\xa4\xa7\xeb\xdc\x34\x77\x9a\x28\xdd\x51\x63\x20\x64\x3a\x0f\x2c\x09\x3d\xb0\xa3\xf4\xc0\x09\xa9\x07\x2e\x68\x3d\xb0\x22\xf6\xc0\x8a\xda\x03\x3b\x72\x0f\x2c\xe9\x3d\xb0\x22\xf8\xc0\x96\xe2\x03\x17\x24\x5f\xd7\x08\x3d\x08\x64\x4f\xf4\x81\x23\xaa\xaf\xcf\x0e\x2d\xde\x62\x4f\xf7\xf5\xd8\x21\xc6\x7f\xdc\x50\x7e\x9a\x36\x61\xd3\xa2\x02\x29\x35\x2a\x38\x55\x80\x41\xa7\xa6\x04\xe3\x34\xa9\xe8\x0e\x3d\x56\x5a\x55\x30\x49\xad\x8a\xb6\x58\xa7\x62\xb5\x4d\xaf\x1a\x1f\x76\x7c\x13\xf2\x1a\xcc\xdf\x55\x79\x69\x8a\x89\xc9\x5c\x2d\x94\x56\x75\x02\x90\x55\xaf\x7a\xda\xaf\x53\xfe\x18\xb4\x5f\x35\x8c\x3c\xeb\xe7\x59\x3f\xcf\xfa\x79\xd6\xcf\xb3\x7e\x9e\xf5\xab\x8a\x67\xfd\x3c\xeb\x37\x5c\x7e\x9f\xac\x9f\x6e\xd5\xe3\x49\x3f\x4f\xfa\x79\xd2\xcf\x93\x7e\x9e\xf4\xf3\xa4\x9f\x0f\x1f\x7a\xd2\xcf\x93\x7e\x9e\xf4\x33\xb7\xef\x49\x3f\x4a\x7d\x4f\xfa\x79\xd2\xcf\x93\x7e\x34\x43\x9e\xf4\x53\x8a\x27\xfd\x3c\xe9\xe7\x49\x3f\x4f\xfa\x79\xd2\xcf\x93\x7e\x9e\xf4\xf3\xa4\x9f\x27\xfd\xc6\x0d\x79\xd2\xcf\x93\x7e\x0e\x23\x68\x9e\xf3\xf3\x9c\x9f\xe7\xfc\x3c\xe7\xe7\x39\x3f\xcf\xf9\x79\xce\xcf\x73\x7e\xfd\xcd\xf5\x9c\x9f\xe7\xfc\x54\x23\x9e\xf3\xa3\xd6\xf6\x9c\x5f\xc7\x8e\xe7\xfc\x86\xed\x78\xce\xcf\x73\x7e\xdf\x29\xe7\xb7\x0f\xd7\x5f\xdb\x0e\x87\xe3\x23\x7f\xd7\x9a\x56\x50\xac\x4d\xe6\x7e\xb1\x6c\xa0\x3a\x88\x83\xf8\x2b\xdf\x0d\xdf\x07\xe2\x60\x71\x27\x9b\x23\x66\x5d\x45\xc5\x97\x36\x61\xca\xd6\x74\x4a\xcf\xc5\x6c\x70\x56\x36\x81\x6a\xe6\xa8\x0f\xbb\x6a\x2d\xc1\xc9\xa6\x1e\x50\xff\x28\xdd\x62\x87\x8c\x15\xc0\x9e\x18\x0d\xf8\x49\x25\x81\x8c\x45\x6c\xad\x1c\x20\x95\x53\xcd\x3e\xc8\xf8\x5f\x69\x72\x78\xc4\xfb\x41\x78\x9b\x8a\x71\x58\x9d\xc1\xab\xce\xd3\x9d\x9d\xdf\x2c\xe4\x8a\xe1\xd3\xd5\xed\xf5\xe2\xc3\xf9\x4f\xe7\x8b\x33\xfc\xf4\xb5\x5a\x5e\xcf\xe0\xfd\x72\xb5\x5a\x5e\x62\x8f\x51\xd1\x52\x78\x6b\xdb\x8d\xb6\xb2\x5a\x5e\xa3\xeb\xc8\xbb\x44\x55\x8b\x0f\xbb\x7a\xf4\xd1\xde\xcf\x30\xce\xd9\x23\xfa\x53\xcb\x77\xf9\x41\x2e\x6a\xff\xed\x2f\x76\x2f\xf7\x55\xf3\x1e\x6c\x5e\x17\xfe\xb6\xec\x82\xf8\x45\x1d\xe5\xf8\xc8\x6d\x10\x45\xf2\xbd\x6b\xbe\x1c\xcd\xc1\x8e\xb2\x58\xbc\xb4\x97\x2c\xdf\xe2\xb9\x4f\x77\x53\xe8\x4d\xb3\x19\xbf\x8b\x69\x54\x69\xb1\xf5\x54\x7a\x57\x3c\x87\x2f\x3b\x61\xef\xae\x71\x86\x80\x32\x9f\x0a\x7f\xb2\x1a\x58\x90\xae\x4f\x16\x53\x62\xf0\x7c\xb0\xa5\x89\x38\x2c\x50\xd3\x39\xcf\xdb\x70\xbd\x2d\xc1\x4c\xb4\xc9\x06\x8a\xb4\x0f\x0a\x98\xd3\xf2\xc5\x28\x60\xc6\xa6\x1b\xb2\x3d\xed\xa3\x6d\x5e\x2e\x56\x1f\x97\x67\xca\x37\xa2\xfc\x37\x71\xfa\xba\xfa\x3f\xf3\x5f\xf1\x1f\x8f\xb2\xee\x79\x6d\xe7\xf6\xd3\x65\xf5\xf7\xc5\x7c\xb5\xb8\x5d\x1d\xe7\xb3\xd2\xbd\x4f\xaa\x09\x82\x4b\xe4\xa4\xd1\x8b\xe4\xaa\x68\x2f\xd0\x49\xa3\xcf\xa9\x55\xe5\x23\x32\xae\x2d\x19\xde\x20\x7d\x99\xd3\xc4\x25\xdc\x6c\x51\x6e\x35\xad\xf8\xae\xb6\x28\x96\x0d\x54\xa3\xc0\xfb\x7d\xf4\x02\x41\xd1\xf5\xcd\xe3\x79\xa8\x67\x1e\x3c\xe4\x2c\xad\x75\x4e\xf8\xf4\xcc\x67\xae\x43\x8e\x73\x1e\x93\xc5\x48\xec\xf4\x19\xdc\x7d\xa7\xed\x74\x19\xa6\xd0\x64\x18\xd4\x63\xa8\xb4\x15\x88\x9e\x8e\xb6\x16\x43\x53\x57\x81\xb2\x70\x3b\x86\x0e\xc3\x88\x06\x83\xd0\x53\x40\x1b\x9d\x40\x7f\x61\x50\x7b\xa1\xa1\xa3\x80\x36\xac\xd7\x5d\x68\x6a\x28\xa0\x4d\xf6\x68\x2e\x34\xf4\x13\xd0\x26\x07\xe5\x01\xca\x83\x62\xf8\x87\xaf\xd7\x5a\xb0\x0b\x76\xb5\x75\x16\xf4\x9a\x09\xf8\xf5\x9a\xa2\xb1\xe0\x52\x80\xa2\x38\x75\x68\xad\x95\x60\xaf\x93\xe0\x6e\xe6\xb5\xd6\x47\x78\x9d\xef\xb6\x13\x6d\x84\x69\x74\x11\xdc\x6b\x22\x8c\xe9\x21\x90\x4e\x9d\x0c\x68\x21\xa8\xba\x06\xf8\x4f\xa7\x46\x07\x41\xa3\x69\x40\x9c\xd9\x4b\x0d\x04\xd7\x7a\x06\x83\x5a\x06\x36\x9b\x66\x9d\x8e\x81\x7a\x7e\x08\x6d\x75\x40\xc3\xa0\xd4\x23\xc0\x7f\x38\x06\xf4\x0b\x6c\x04\xd3\xb4\xda\x05\xc5\xa2\x8a\x3a\xb4\xda\xba\x05\x96\x32\x0e\x2d\xcd\x02\x9d\xfe\x00\xf9\xe6\xc5\x89\xa5\x1e\xed\x01\xb4\xd1\xc6\x99\xa5\x7e\xdd\x01\xc2\xf4\x32\xc5\x51\xa6\x49\x8e\x31\x4d\x75\x84\x69\x44\x67\x80\xe2\x89\x9a\x42\x63\x60\x02\x7d\x81\x61\x6d\x01\xb2\x83\xab\x57\x57\xa0\xa9\x11\x40\xb5\xda\xd6\x14\x68\xe9\x03\x50\xcd\xf6\xea\x09\xe8\xb4\x01\xa8\x17\x51\xb5\x04\xf4\xba\x00\x54\xdb\xaa\x8e\x80\x5e\x13\xc0\x81\xed\x77\x7f\xed\x2a\x23\xd4\x7a\x00\xf4\x87\x6a\xa9\x1f\x40\xf3\x50\xd2\x75\x03\x6c\x34\x03\x2c\xf4\x02\x2c\xb4\x02\x2c\x74\x02\xec\x34\x02\xac\xf4\x01\xec\xb5\x01\x1c\xe8\x02\x38\xd1\x04\x70\xa2\x07\xe0\x44\x0b\xc0\x89\x0e\x80\x13\x0d\x00\x77\xfc\xbf\x23\xf6\xdf\x8a\xfb\xa7\x33\xff\x0e\x79\xff\x51\xd6\xbf\xe2\xf6\xf1\xfe\xba\x7e\xce\x5f\x65\xf6\xf1\x27\x1e\x74\x8c\xbf\xca\xeb\x93\x4e\x6a\xb4\x00\xd1\x0e\xab\x8f\x5f\xd8\x37\xd9\xfe\x21\x4e\x1f\xbf\x0d\xa9\xb8\xfe\x7e\x46\x9f\xba\x63\x1a\xe2\xf3\xd1\x36\x85\xc7\x60\x88\xcd\x27\x6c\x41\x4a\x96\xbf\x8f\xcb\xc7\xf7\x66\xc5\xf1\x4f\xc5\xe4\xbb\xe7\xf1\x07\x59\xfc\xc2\x3b\x81\x5f\xe1\xf7\x71\xf8\x35\x53\x4f\x78\xb3\x34\x0c\xfe\x04\x3c\xfd\x04\x2c\xfd\x84\x1c\xbd\x7b\x86\x7e\x90\x9f\x97\x2c\x3c\xd5\x8b\xdd\x9d\x1a\xeb\x5e\xb1\xf4\x50\x4a\x6e\xde\x0e\xb4\xeb\xf5\x19\xb4\xf9\x77\xab\xb1\x4b\x16\x3b\x76\xc2\xbd\xbb\x60\xde\x89\xbc\xbb\x15\x20\x6e\xcf\xb9\xdb\x32\xee\xee\xe2\x1f\x96\x6c\xfb\xeb\x44\x3f\x1c\x70\xed\x53\x30\xed\x23\x3c\x3b\xe0\x61\xee\x31\x96\xbd\xc1\xa5\xa3\x6d\xf7\x71\xec\x2d\x26\x1d\x6d\xb7\xcb\xb0\x3b\xe7\xd1\x27\x62\xd1\xc7\x38\x74\xfa\x62\xb1\x87\x41\x57\x79\x72\xfc\xa7\xbc\xcd\x9f\x77\x58\x72\x5a\x8c\xa9\xc5\x9e\xb7\x38\xf2\xdf\x49\x0c\xc8\x8a\x1f\x9f\x88\x1d\x1f\xe1\xc6\x9d\xc5\xad\x1c\x30\xe3\xc3\xbc\xb8\x0c\x98\x50\x46\x42\x6f\x80\xa5\x11\x30\x71\xbb\x58\xb2\x3d\xf0\xa0\x0b\xb0\x94\x5d\x42\x3e\x56\xd5\xe2\xc3\xdb\xac\x37\x21\x8c\xa1\x63\xc3\x35\x9c\x37\xfe\x55\xa8\x62\x2b\x4e\x8e\xbd\xd0\xbc\xd4\x64\xb6\xdb\x86\xeb\xb6\x60\xba\xed\x79\x6e\x6b\x96\x9b\xce\x71\xd3\x19\x6e\x0b\x7e\xdb\x86\xdd\xa6\x73\xdb\x56\xcc\xb6\x35\xaf\x6d\xcf\x6a\xbb\xe0\xb4\x5d\x30\xda\x2e\xf8\x6c\x17\x6c\xb6\x0b\x2e\xdb\x15\x93\x6d\xc1\x63\xe7\xea\xbe\xe8\x22\x88\x1f\x0f\xc1\x23\x33\x9d\x37\x09\xbb\xc3\xd6\xae\x70\xa5\x6f\x80\xa1\x31\x75\x6f\x25\x81\xe3\xea\x40\xd8\x03\xcb\x55\x88\xc6\xec\x0b\x72\x88\xc3\x7c\xf9\xc4\xd2\x34\xdc\x1c\xb1\x1f\x3e\x35\xae\x4a\xb9\x79\xbe\xcc\xe5\x2d\xe7\x0b\x33\xb1\x94\x2a\x5c\x84\x72\x11\x25\xba\x02\xb1\xa0\x6e\x8b\x2a\xc6\x49\x2c\xbd\x2f\xc5\xd9\x56\x71\xa5\xe7\x10\xb1\xec\x4f\x8a\x7b\x83\x20\x7e\x91\xd5\x85\xa3\x3c\x58\x0b\x7e\x28\x16\xc8\x31\xb2\x91\xf2\x84\x31\xbf\x71\x49\x0b\xf2\xc5\x42\xe5\xba\x0b\xe4\xdf\xbf\xdd\xf1\x6b\xdd\x99\xc3\xf5\x29\x7b\xf8\xf2\xf4\xe3\xdb\x94\x65\xf9\xdb\xa7\x1f\xdf\x96\x38\xfb\xa9\x5c\x40\x9f\x15\x5d\x9e\x60\xd8\xfb\xc2\x03\x16\xc3\xdd\x65\xcb\xc8\xdd\xd0\x88\xcc\xb7\x29\xcb\xb6\xc9\x68\x6c\xcc\x34\x12\xd6\x7e\xf1\x2a\xf3\x83\xb5\x5a\x83\xac\xa8\x03\x51\x18\xf3\x35\x7b\x1a\x3c\xc7\xb0\x4d\xd2\xf0\x5f\x7c\xb4\xf1\xbd\xb1\xf4\x81\x8f\xf4\x0d\x7f\x32\xeb\x6d\x90\x8e\xf8\x12\x08\xae\x3f\xbc\xa3\xcf\xc8\xad\x87\xe0\x76\x68\x7e\x2a\xc3\xa7\x81\x03\x50\xd6\x49\x64\x96\x34\x19\x39\x7d\xb5\x0f\x36\xf3\xcb\x60\xaa\x11\x7b\x46\x5c\xc7\xd0\x01\xd7\x3d\x70\x9c\xe5\x41\xce\x64\x97\x08\x4f\x94\x98\xc7\xaa\x77\xcc\xec\xd3\x20\x5a\x50\xba\xc5\x05\xc6\x2b\xa7\xd7\x00\x8a\xc6\x12\xcf\x73\x7d\x58\x5e\x2c\x6f\x54\x46\xf1\xe7\x9b\xc5\xe7\x19\xbc\xbf\xf8\xb4\x10\x7f\x2f\xae\x66\xf0\x79\x71\x71\xb1\xfc\xc5\x6c\x6f\xb9\xbc\xe1\x4b\x00\x71\x7a\xc8\xe4\x74\x8f\xe9\x5e\xe9\xa4\xdb\x54\xa3\x5a\xfc\x6e\x8c\x7e\xc8\x6f\xd8\xd4\xe2\xc2\x64\x9d\x74\x52\x74\x9b\xd1\x4f\x65\xaf\x19\xfd\xf4\xc6\xe0\xce\x51\xca\x16\x76\xef\x20\x46\xc1\xc2\xf6\x3d\xc4\xea\x4f\x74\xdf\xc5\xaa\x63\x8a\x37\x91\xc1\xfa\x90\x8a\x88\x29\xf2\x85\xac\x9a\x62\xf6\x52\x1a\xd9\x1c\x16\x98\x80\xf9\xfb\xe5\x3f\x17\x33\x78\xbf\xb8\x58\xfe\xe2\xf6\xd5\xa2\x2a\x43\x9c\xc8\x36\x99\xbd\x5f\x0b\x93\x77\x21\x0a\xee\x59\x34\xfd\xa0\xbd\xe0\x97\xc1\x0e\xa0\xb9\x6c\x5d\x35\x72\x10\x23\x46\x3c\x59\xf3\xdb\x8a\x0f\x86\xc7\x21\x4a\xb1\x8a\x4d\x72\x30\x1b\x64\xad\x7e\xf8\x27\x6f\x17\xb6\x1f\x56\xcd\x2c\x9b\x6a\x47\xd4\xff\xcd\x68\xc0\x67\xdb\xe4\x10\x6d\x04\x1c\x28\x23\xbd\x25\xf0\x11\x07\x79\xf8\xc4\x20\x5b\x07\x51\x75\x15\xb9\x0e\x1e\x5c\xb7\x86\x3b\x96\x6d\xc3\x87\xfc\xec\x90\x1a\x00\xd2\xc6\x43\x48\xb3\x71\x54\x2e\x83\x59\xc6\x32\xd8\x14\xb5\x1a\x7c\x68\xb6\x8f\x82\x17\x08\x0a\x45\x83\x30\x1b\x9d\x4c\xe5\x12\x16\xe6\x8d\x1a\xf2\xdf\x20\x0b\x77\x87\x28\x0f\x62\x96\x1c\xb2\xe8\x85\xf7\xf0\x73\x66\x96\x51\xf4\x21\x4d\x76\x90\x3f\x27\xdc\x44\x18\x05\xe9\x49\xc4\xe2\xc7\xbc\xd8\xc6\x4a\x67\x6b\x06\x7f\x62\xa7\x8f\xa7\x33\x78\x66\xec\xeb\x09\xdf\x58\x9d\xf0\xbf\x46\x0c\xcb\x07\x97\xfd\xf9\x54\xbd\xfd\xd2\x6f\xbb\x4f\xb2\x90\x3f\x6d\x79\x1c\x3f\xcc\x61\x3d\x8a\x44\x26\x71\xd4\x56\xfd\x15\x77\x5f\x50\x23\x62\x17\x2a\x0e\x70\x25\x0f\x70\x71\x7e\xb5\x80\x7d\x34\x4a\x0b\xf1\xd1\x30\x34\xb8\xbe\xcd\xbf\x85\x46\xfb\x21\x83\xb5\x7a\x6b\x40\xfd\xca\x4d\x9b\x55\x40\x7d\x38\xc7\xed\x76\x06\x67\xbd\xc8\x6f\x53\xb5\xbf\x42\xf0\x2d\x1c\x71\x64\x98\x6f\x11\x0c\xe7\x79\xd4\x2c\x4f\x9a\xe3\x3b\x3d\x20\xe7\xf8\x62\xce\x19\xbf\x65\x90\x93\xd4\xb4\xb7\x72\xcb\x2f\x61\x5e\x09\x3f\x48\xc4\x05\x8c\x16\x55\xdd\x25\x15\xef\x23\xd9\x09\xa7\xf0\xfe\x85\xcf\xe4\xc1\x21\xca\x67\x10\x88\xad\x7a\x60\xf2\x19\x93\xf3\x7c\x81\xcc\x6b\x14\xb7\x6e\x3f\xcc\x2f\x16\x58\xb5\x2d\xfe\xe2\xcf\x6f\x66\x70\xb1\xfc\xf9\x87\x77\xe3\xeb\x25\xb3\xd5\xd2\x49\xb7\x2d\x06\x75\x64\x53\x4c\x7e\xc8\xdb\x3a\xf0\xbb\x97\xe9\x26\xa1\xcf\x13\x4d\x42\xe3\x76\x11\x93\xd0\x67\x3f\x09\xe9\xcb\xef\x7c\x12\xfa\xec\x27\x21\x51\xbe\xeb\x49\xe8\x39\xdc\xe4\x5b\x7d\xeb\xc6\x05\xf7\xc6\x65\xf5\x5a\xc3\xed\x17\x7e\x35\xed\x0f\x3b\x6f\x8b\x68\x58\x7d\x2e\x24\x62\x33\xd8\xb1\x20\x3b\xa4\x72\x43\xf1\x98\x86\x1b\xc8\xfe\xfb\x10\xa4\xfa\x40\xcc\xb7\xeb\xa4\x67\xaa\x98\xe0\xb6\x7e\xbd\x4e\xf4\xb3\x61\xe7\xae\xfe\xc5\xd2\xe4\x24\x8c\x37\xec\x1b\xdb\x14\x8b\x64\xc9\xd8\x97\x77\xd9\xbe\xb7\x9e\x47\x9b\xb2\x48\xee\xa6\x8a\x39\x34\x62\x0f\x39\xb0\xcd\x23\xab\x8f\xf7\x87\xda\x2d\xec\xcb\x31\xfb\xe5\xf3\xab\xf7\x4b\x9e\xec\x07\xba\x45\xb4\xf2\xff\x69\x77\x41\x67\x26\x6d\x2b\x40\x06\xca\xb1\x24\xe5\x5e\xce\x77\xbb\x43\x1e\xdc\x47\x4c\xee\x8b\xaa\x13\xdf\xbc\x66\xd9\x86\x4d\x39\x7d\x9e\x2a\xd7\xf8\x7a\xb8\x67\x69\xcc\x72\x96\x9d\x84\xa5\x95\x56\x88\xa2\x08\x1f\xe1\x9b\x7c\x2d\x2b\xf6\xb5\xba\xf8\x3e\xf3\x9f\x40\x58\x9d\xc9\xab\x58\x00\x6c\x33\xcb\xcb\xa7\x4c\x80\x0b\x6b\xf5\xa3\x7d\x52\x19\xfe\x3b\x7c\x88\x92\xc3\xa6\xfc\xbf\xbb\x20\x0e\x1e\x59\xfa\xb6\xdb\xd8\x22\xe8\xf5\x77\xd1\x8d\xca\xbf\xef\x83\x94\xc5\x79\xeb\xfa\x69\xf2\x2c\xbf\x49\xdd\x8e\xea\x2c\xa1\xda\xba\x93\x65\x55\xdd\x4f\x5a\x5f\x3f\xdd\x6f\x3b\xdd\xba\x4e\x62\xa1\xba\x14\x66\x85\x64\x54\x91\xab\x4e\x9c\x9e\xe2\x7b\xf9\x7d\xb0\x66\x1b\xde\xe4\xac\x54\x80\xe0\x53\xdf\x23\x6b\x1d\x60\x15\xe7\xfc\xd3\x34\x88\x1f\xd9\x46\x09\x91\x9d\x76\x5a\xba\x4e\xe2\x87\x28\x5c\xe7\xad\x5e\xe7\x43\xbf\xd3\xde\x13\xd8\x35\xbe\xe1\xca\x7f\x58\x27\xd1\x61\x17\x77\xfe\x43\xdf\x4a\x8c\xdf\x41\x7b\x62\xe9\x8f\x25\x76\xbb\xbd\xfd\x46\x77\x7a\x52\x74\x51\xf2\x50\xf5\x68\xed\x64\x69\xcf\x73\x46\x81\x3e\x93\xd0\x5e\x4f\x30\x6f\x70\x35\x3e\x3a\x5a\x34\xf7\x3a\xb6\xbe\x7d\x66\xe1\xe3\x36\x3f\xde\x57\x5a\x5c\xce\x6c\xe2\xae\x26\x5c\xd9\x46\x39\x77\x84\x19\x7f\x5a\xa7\xe5\x63\x2b\xff\x5b\xef\x96\xa1\x74\x9a\x05\x9b\xff\xef\x90\xc9\xb3\xc1\xdb\xca\x9c\x7c\xee\xd2\x85\x98\xad\x53\xc6\x62\xf8\x53\x63\x9a\xef\x31\xb9\x67\x2c\xcd\xfe\x7c\x0a\x3f\xa7\x2c\x28\xa5\xfa\x64\x33\x66\xf0\xd8\xf8\xb7\x6d\xa3\xd9\xb2\xb1\xbd\x7e\xba\xba\x01\xe2\x10\x43\xb2\x0b\xf3\x5c\x08\x4e\xd4\x8e\xd3\x1f\x2a\x89\xb8\xe7\x2d\xff\x68\xa5\x2c\xde\x30\x21\xe2\xa6\x5f\x7a\xf1\xd7\x7c\xe8\xb1\xf6\x87\xe1\xbb\x0b\xab\xce\x94\xd1\xfb\xcc\x4a\xdf\x64\xd1\x00\xfd\xbc\x22\xbd\xb6\xbd\x8f\x8c\x3f\xdf\x81\x86\x19\x45\xd9\xcd\xa3\xeb\x83\x51\x75\x83\xcd\xb1\xd1\x4b\x39\xd4\x87\x66\x3b\xd0\xfb\x28\x88\xbf\x0e\x2d\xf5\x0d\xf7\xf1\xad\x87\xfb\x9e\x9b\x35\xf9\xb9\xc1\xdd\x8d\xd9\x6a\x05\x47\xc4\x1d\xc9\x8f\xd4\xa0\x1f\xb5\xff\xc3\xd3\x2e\x27\xf0\x4d\x6e\x0f\x07\x7f\x93\xad\x93\x94\xad\x83\x74\xe8\xf8\xce\x09\xe4\xec\xdb\xb0\x99\x28\x79\xcc\xae\x83\x78\x60\xb7\x5e\xfd\xc2\xfd\x63\xbb\x18\xbd\x38\xee\xd1\x99\xdb\xfb\xfe\x1e\xc4\xfd\xc8\xb0\x33\xf5\xef\x98\x25\x7a\x41\x78\x44\x28\x89\x5c\x5a\xef\x88\x6c\x93\x3c\x80\xb6\xde\x26\x49\xc6\xb2\x42\x0c\x3b\x4a\x1e\x81\xc5\xb9\x91\x0c\x93\xe0\x84\xf3\x43\x1a\x9f\xc2\x2d\x63\xf0\xdb\x7c\xf3\x14\xc4\x7c\x69\xc8\x1f\x3b\xfc\xbf\x07\xa9\xfe\xd9\x7f\xf0\x2c\x4a\x1e\x1f\xc3\xf8\xf1\xed\x26\x59\x67\x6f\x9f\x42\xf6\xfc\x36\x28\x2c\x9c\xfc\xb7\xac\xfc\xe7\xf1\x90\xe6\x32\x8e\x5e\x9a\xad\x96\x37\xb5\x0b\xc4\xc9\xc7\x4a\x51\x5e\x2c\x46\xcd\xcf\xd1\xcf\x63\x10\x5f\x82\xb2\xb6\x30\x27\x54\x23\xa3\xe6\xb5\x86\x0d\x95\x9b\x04\xbe\xf9\x32\x3c\x5f\x65\xa2\xe6\xd0\x5e\x87\x36\xaf\x82\x19\x05\xab\xad\xdc\xe3\x89\x15\x6a\xf1\x28\xea\x5c\x36\x52\x4f\x30\x8a\x4c\x64\x9a\xf9\x84\xc4\x97\x6c\xa7\xf0\x41\x1e\x62\x10\x1c\x7c\xf4\x52\xa7\xc2\xe1\xbd\x9f\x1d\xf6\xfb\x24\xcd\xd9\x86\x2f\x45\xc6\x3d\x50\xe5\x59\xcb\x72\x6f\x21\xf5\x5b\x0b\x87\x5a\xb9\x57\xde\x26\x59\x5e\x5e\x65\xec\xa9\x92\x30\x5a\x0a\x36\x6b\x88\xc9\xd2\x7c\xa0\x84\x5a\xfa\x3d\xad\xbe\x90\x76\xba\xfa\xd2\xb7\xff\x6d\x96\x6a\xaa\x76\xff\x29\xbb\x35\xf8\x0a\x60\x3e\x65\x26\xf6\x5a\xb3\x6c\x75\x77\x90\x1d\x76\xbb\x20\x0d\xff\x25\x60\xd5\x96\x48\xdf\x51\x17\x29\xd6\x5f\x3d\x93\x05\x0a\x54\x02\xbe\xc3\x0d\x6e\x1d\xbd\x77\xf0\xad\x7d\x0c\x0e\x8f\xec\x9f\x21\x7b\x9e\xe2\x38\x6b\xa1\x96\x53\x5e\xc2\xb4\x1a\x66\x6c\x99\x1a\x57\x06\xda\x2f\x7c\x62\x94\x24\xb0\xdc\xef\x95\xa3\x2e\x4f\xc4\xb9\x0b\x08\x64\xbf\x8c\xbe\xb3\x06\x07\x92\x71\xc7\x6f\xa3\xe4\x99\xa5\xef\x93\x43\x6c\x24\xf7\x7e\xe4\x33\x47\x17\x55\xe3\x0c\xea\x76\x23\x63\xbc\x36\xdc\xf3\xea\xf5\x61\x5a\xd1\xcd\x66\xdd\x28\x4b\xe7\x08\x53\x71\x78\x46\x9e\x47\x0a\xa2\xe7\xe0\x25\x83\x7b\x56\x6e\xff\x8d\x6c\x0a\x45\xd1\x24\xad\x61\x47\xde\xb4\xf1\xe6\x1c\xf6\xfb\xef\xf7\x59\x7d\xaa\x1a\x47\x79\x56\xe2\xd6\x8e\xf6\xac\x22\x26\x72\x1a\x19\xea\xa9\xe3\x1e\x54\xb6\x0f\xd2\xaf\x62\x96\x9f\x76\x92\xbb\x55\xae\x33\xc5\x4c\x87\xba\x02\x76\xba\x13\xdd\xe4\x66\xba\x33\xf9\x92\x81\xdc\x76\x56\x77\xb4\x7a\xd9\x8f\x8d\x71\xcc\x2c\xba\x0b\xe3\x39\x3e\x79\x86\xdd\xc1\xd4\xcb\xce\x35\x6d\x27\xc9\x24\x56\xd4\x1d\x78\xbf\xb2\x78\x6d\x26\x5d\x57\x9c\xc6\x94\x2f\x5c\xb8\xdb\x47\x8c\xb7\x8b\x6d\xe0\xfe\xa5\xc0\xad\x2b\x51\xb4\x5d\x18\x87\x3b\x43\x85\x92\x5a\x80\x41\x1e\x2b\x2c\x33\x01\x8a\xa3\xd3\xcd\x65\x1a\x22\x8d\xe7\x4f\xfc\x85\xfe\x16\xf0\x36\xce\x20\x7c\x68\x64\xdb\xc8\x60\x7f\xb8\x8f\xc2\x6c\xcb\x78\x67\xac\x19\xb0\xa7\xe1\x45\x4f\x5d\x7e\x78\xc7\xef\xeb\x90\xb3\x0c\xc2\x1c\x9e\xc5\x84\x13\x27\x7c\x97\xfb\x95\xb7\x31\xce\x58\x4d\xfb\x05\x46\x73\x64\x71\xf3\xb2\x69\x41\x2e\x64\xcb\xe4\x25\xaa\xe4\x23\x99\xcc\xb3\x81\xc8\xe2\x10\x66\x90\x88\xa7\x1f\x44\x22\x30\xc4\xbe\x85\x59\x9e\xc9\xad\x60\x90\x41\x00\xdb\xd0\x44\x3c\x45\x7d\x91\xa6\x1f\xeb\xb7\x98\x17\x17\xdc\x4c\x78\xea\x35\x49\x18\x81\x9a\x7e\x97\x37\x85\x7f\x98\x44\xe7\xc9\x17\xc5\x50\x70\x50\xcc\x99\x85\xe7\x5c\xbe\x62\xbc\x81\xba\x83\x27\xd7\xf3\x9b\x7f\x7c\xf9\xf0\x71\x7e\xb3\xfa\xb2\xfa\x7c\x8d\x3e\x83\x02\x85\x85\x8b\xf3\xab\xc5\xac\xf8\xfb\xfd\xfc\xc6\x2d\x42\x30\xd4\x48\x84\x01\xde\x46\xc4\xcf\xdf\x8f\x1c\x68\x31\x25\x19\x2d\xfc\x40\xc6\xfc\x5c\xf7\xd0\x52\xdd\xba\xfa\x2c\x78\x25\x86\x99\x97\xe4\xd8\x68\x77\x14\xcb\xa2\xfa\x83\xfc\x18\x3e\x75\xe5\xdf\xde\xbc\xa9\x50\x17\x31\xb0\xc6\xe7\x83\x9f\x64\x16\x98\x60\x9d\xf3\x75\x92\x5c\x82\x7d\x9b\xb5\xae\x15\x4a\xc9\x23\xd8\x04\xb1\x49\xae\x4d\xc9\xc2\x85\x0f\xf0\x8d\xd7\xac\x96\x6c\xca\x72\xac\x34\x56\xf7\xcf\xb8\x4f\x67\x1b\xe4\x90\xa7\xe1\xe3\x23\x4b\xf9\x62\x30\x4a\x9e\x67\xdc\x66\x1d\xbe\x6b\x5f\x63\xd4\x64\xbb\x0d\xad\x6b\x04\xf7\xc9\x13\x3b\x85\x5b\x79\x56\x3e\x7a\xe1\x1f\x9d\x71\xbf\x90\xd0\x93\xe4\x35\xdf\x8a\x36\x42\x00\xcf\x41\x1a\xcb\x6f\xa8\xf6\x3a\xa3\x26\x1b\xd6\x66\xb5\xec\x5f\xf7\xf9\x94\xd7\x31\x1b\x52\x27\x70\x88\xc5\xc3\xf9\x26\xa5\xe8\xf7\x87\x5c\x7c\x02\x1b\x8f\x5a\x5a\x3a\x85\x3f\x9d\x99\x3d\xf9\x3c\x3d\xec\xf6\x59\xd9\x8e\xd3\x3f\x03\xcc\x33\xa1\xb6\x52\x7e\xb4\x45\xde\xa9\x0d\x4b\xf9\x3a\xd2\xc0\xe7\x23\x8b\x38\xef\x2f\xdc\xcd\x49\x14\x25\xcf\x42\xa4\x34\x39\xa4\xcd\x57\xeb\x7f\x2a\xfe\x06\xfe\xf3\xdd\x4c\x28\x14\xe5\xec\x31\x49\x5f\xc6\x27\xb6\x37\x6f\xce\xe6\x57\x3f\x2f\x6e\xde\xbc\x99\x89\x3b\x10\xcf\xe4\xef\xf0\xe6\x8d\xc0\x9e\xf8\xbf\xfe\xef\xec\xef\xcd\x0b\xfc\x07\xfa\x02\xbf\xcc\x6f\xae\xce\xaf\x7e\x1e\xba\x42\xf3\x02\x3f\xb8\xba\x03\x49\x91\x75\xec\xff\xbb\xb3\x1b\xa8\x2f\x20\xb6\x70\xf1\xdf\xcd\x28\x14\xe8\x99\x14\x7e\x78\x57\xac\xbf\x84\xea\xa4\x3a\xc4\xc7\x77\x7c\x01\xc8\x6e\x90\x83\x76\x56\xa6\x97\x51\xa6\x87\x1f\xde\xc1\xfd\x21\x47\x6c\x23\x9b\xcd\xfb\xf7\x77\x10\x40\xd1\x15\xad\x8b\xf0\xe5\x91\x90\xa8\xbd\x67\xf9\x33\x63\xe3\x76\xb9\xad\x78\x03\xff\xc1\xff\x07\x96\xff\x18\x6a\x73\xd9\x04\x93\x90\xd1\x7f\xb4\xee\x0f\xfe\x53\xd3\x66\x7e\x61\xc3\xe7\xd4\x3b\xb7\x0a\xbb\xcd\xfe\x3e\x1d\x5b\x6a\x7c\x9f\x21\x02\xc3\x64\x99\xa4\x55\xa9\xf1\xaa\x01\x9b\xf8\xd1\x98\xbc\x27\x48\x87\x10\xe9\x7b\xeb\x2e\xc2\xe0\xf7\x53\x01\xf8\x28\x04\xdf\xd0\x62\xb9\xc0\x37\x82\xf0\x0d\x6d\x16\xa8\x3e\x16\xc3\xc7\x88\x96\xd1\x50\x7c\x04\x8c\x8f\xc0\xf1\x31\x40\x3e\x0a\xc9\x47\x41\xf9\xa6\x58\x3e\x12\xcc\xb7\x7f\x45\x31\x70\xbe\xf5\x6b\x8a\xa5\xf3\x27\xe5\xf3\xa7\x21\xf4\x9d\x33\xfa\xb8\xf7\x8e\xca\xe9\x63\x48\x7d\x73\x56\x1f\x41\xeb\xdb\x8f\x64\x53\x62\xdf\x1d\xb3\x8f\xa0\xf6\xd1\x71\x19\x42\x64\x86\xcc\xee\x4f\x46\xef\xe3\xf8\x7d\x43\x93\xe3\x94\x7f\x47\x3a\x6e\xba\x70\xcc\xca\x38\x50\x0e\xe4\xe9\x13\x75\x09\xe5\x49\xd6\x8e\x48\x99\x79\x41\x0c\x72\xe1\x32\x6f\x1d\x77\x30\xf0\x75\xe4\x41\xc1\xff\x6f\x19\xdc\xe6\xc1\xfa\xeb\x26\x0d\x9f\x58\x5a\xb2\xfb\x30\xbf\x3e\x77\x19\xa0\xce\x5b\x2a\xe8\xe6\xce\x66\xe3\x0c\xf6\xbd\x8f\xd2\xe8\xdc\x1e\x38\x7a\x9e\x84\xcb\x2a\xcf\x58\xd6\x82\x7d\x90\x06\x3b\x96\xb3\x34\x23\xcb\x0b\x9a\x06\xd4\x40\x7c\x01\x1e\x4c\x5b\x8b\x4d\x4b\xdf\x48\x9e\x83\x93\x38\x34\x7e\xf0\xa0\x49\x3d\x8f\xce\xd8\x33\xc5\xb3\xc7\xb7\x42\x19\x0a\x4d\xac\x55\x00\x08\xc1\x33\x29\xe3\x70\x19\x15\xab\x33\x41\x7e\x12\x21\xdd\x2a\x63\x46\x9d\xd1\xd8\xd8\xe6\xee\x10\xe5\xe1\xbe\xa5\xe2\xcf\xa7\xa3\x3a\x23\xcd\x53\xc8\x04\x94\x63\x6c\xb3\x4a\xe5\x6e\x58\x03\x3b\x12\xa1\x19\x89\x34\x8f\xef\x56\xcd\xa3\xa5\xa7\x68\x8f\x4c\x74\xb0\xb7\x2c\x9d\x8f\xba\x46\x1a\xbb\x94\xc4\xc6\x29\xcf\x17\x31\xd8\x32\x1e\x39\xe3\x1f\xf5\x22\xfd\xfd\x4c\x7a\xa7\x0b\x72\x05\x65\x55\x30\x58\x4f\xe1\x86\x35\x42\xb3\x71\x95\x42\xe1\xb7\xc6\xc0\xf9\xaf\xdf\x8a\xb3\xca\xb5\x40\xe6\xe9\xd3\x8f\xa7\xf5\xeb\xf4\x5f\xa8\x0b\x0b\x86\x4e\x38\x97\x33\x01\x83\xdd\x47\xc9\xfa\x6b\x56\xe6\x6f\x2a\x82\xad\x28\x45\x51\x90\x69\x7a\x37\x49\xcc\xe0\x9e\x3d\x24\x32\x3b\xb7\x26\xcb\x3d\xca\xa2\x9a\x85\xba\x3c\x69\x2b\xa5\x46\xd1\x2b\x33\x59\x2a\x51\xfb\x1c\x22\x16\x64\x39\xfc\xed\x5d\xf9\x24\x05\x97\x14\x34\xda\x8c\x1b\x22\x45\xe2\x7d\x21\x9c\x2e\x3d\x80\x8e\x72\xf0\xab\x99\x7b\xca\x8f\x16\xc8\x50\x54\xa9\xcc\x8e\xb2\x58\xcf\x73\x42\x4e\x56\xf3\x98\xb0\x16\x1b\x19\x84\x8b\x6d\x45\x69\x88\xac\x6f\xaf\x49\x7a\x6c\x99\xc3\x68\xdd\xc9\x9e\x7f\xfc\xd9\xad\x9b\xc1\x9f\x62\x6b\x9a\x4f\x70\xb7\x6d\xc8\x0c\x3b\x5d\xe7\x45\x95\xdd\xa1\x4e\x2c\x62\x7a\x18\xa0\x2c\xf7\xac\x0a\x05\x17\xdf\xe0\x6e\x62\x1c\x5c\x8a\x9d\x6e\x7a\x9d\x19\x3c\x6f\x59\x31\x67\x95\x3b\x41\x94\x49\x91\xee\xa5\x71\xa8\xa8\xd8\xf4\xd5\xa9\x65\x08\x33\x8a\x48\x03\xf8\x70\x88\xd7\x25\xcd\x5f\x7e\x19\x82\x28\x65\xc1\xe6\xa5\x4c\xcc\x81\x32\x5a\x84\x4c\xc2\x76\xa6\xf7\x3a\x6f\xc8\xd5\xa8\x2e\x98\x5a\x78\xab\x8a\xc4\x1b\xcd\xec\x31\xea\xdc\x8d\xfd\x2e\x3a\xce\x1c\x33\x96\x35\x46\xfc\x2b\x35\x73\xcc\x60\xd6\x98\x9b\xf2\x05\x40\x7f\xf8\x5e\xc4\x3c\xa7\x9e\xfe\x12\xb1\xdd\xa0\x5e\x42\xa2\x8c\x76\xf2\xce\x68\xb3\xff\x23\x17\x12\xdd\xf1\xa3\xcd\xfc\x8f\xb2\xfa\x10\xa6\x6a\xe6\x19\x47\x59\xff\xdd\x67\xfc\x37\xc9\xf6\x6f\xf1\x29\x1d\x4e\x44\x83\x9d\x45\xfb\xb3\xfc\xab\x59\xfb\x51\x76\xb5\x19\xfe\xfb\x32\xf6\xe3\x5e\xab\x91\x0c\x34\x58\x37\xb5\x2c\xa5\xb3\xda\x7d\xa6\xfe\x09\xb2\xf4\x4f\x9e\xa1\x7f\xaa\xec\xfc\x53\x65\xe6\x9f\x34\x2b\xbf\x8b\x8c\xfc\xd8\x3c\x47\xd4\x4c\xfc\xd4\x2c\xfc\xc4\x0c\xfc\xc4\xec\xfb\xc4\xcc\xfb\xf4\xac\xfb\xe4\x8c\xfb\x76\xd9\xf6\x2d\x33\xed\x5b\x67\xd9\xb7\xce\xb0\x6f\x9d\x5d\xdf\x3a\xb3\xbe\x75\x56\x7d\x37\x19\xf5\x1d\x64\xd3\x27\x67\xd2\xa7\x65\xd1\x77\x94\x41\x7f\xa2\xec\xf9\xd3\x64\xce\x37\xc8\x9a\x8f\x5e\xca\x8e\x65\xcc\x2f\x33\xe0\xa3\x8c\x8e\x66\xcb\x6f\x64\xbf\x47\x19\xee\xcd\x94\xdf\xcc\x7c\x8f\xb2\xd8\x93\x25\x5f\x9b\xf5\x1e\xb9\x9a\xaf\x33\xe4\xf7\x67\xbc\x47\xba\x63\x65\x76\xfc\x89\xb3\xdd\x8f\x66\xba\x27\xa5\xf9\x1e\xca\x72\xdf\xc9\x5a\x8f\xdc\x8d\xb4\x33\xdc\xeb\x33\xd6\xe3\xb6\xa2\x63\xd9\xed\xf9\xad\xd0\x36\x38\xda\xcc\xf6\x36\x39\xd9\x07\xb2\xda\x77\xb2\xd4\xe3\x0c\xb7\x32\xda\x6b\x33\xd4\xd3\x9f\xd5\xac\x37\x3b\x3d\xae\x95\x75\x7e\x7f\xb7\x99\xe9\xa7\xc9\x4a\x6f\x9d\x91\xde\x3a\x1b\xbd\x6d\x26\x7a\x42\x16\x7a\x72\x0a\x77\xbb\xec\xf3\x36\x99\xe7\xdd\x38\xbe\x2d\x32\xce\x4f\xec\xf6\xb6\x4c\x2b\x6f\x90\x52\x9e\x2f\x48\xb0\x1e\xd0\xfe\x74\xf2\x4d\xff\x35\x6e\xe2\x1d\x4e\x25\x5f\xa4\x86\xc7\x7d\xdc\x47\xd2\xc8\x4b\x3a\x1a\xed\x4f\x9e\x24\x85\xfc\x04\xe9\xe3\x4d\x52\xc7\x8b\x85\x03\xce\x51\xeb\x3c\x6d\xfc\x78\xca\xf8\xfa\x99\xe1\x9c\x74\x7d\xe9\xe2\x9b\x4e\x7c\xc2\x20\xd5\x38\xfc\x55\x27\xbe\x85\xe7\xbb\x9d\x26\x1e\x77\x1a\xaf\xb8\xef\xe1\x14\xf1\x8d\x94\xef\xc8\x4f\xa2\x3e\x3d\xbc\x75\x70\x62\xd8\x45\x4f\x8b\xa0\xb7\xd2\xc2\xeb\xbd\xea\x94\x09\xca\xc4\xab\x6e\x1b\x4a\xd6\x79\xd5\x49\xdd\x30\x45\x6a\xf7\x69\xd2\xba\x3b\x4c\xe9\x8e\x77\x73\x92\x52\xb9\x53\xd3\xb8\x13\x53\xb8\xdb\xa5\x6f\xb7\x4a\xdd\x4e\x4b\xdb\x4e\x4b\xd9\x4e\x4c\xd7\x4e\x4d\xd5\x4e\x4b\xd3\x4e\x4e\xd1\x6e\x95\x9e\xdd\x2e\x35\xbb\x6d\x5a\x76\xdb\x94\xec\xb6\xe9\xd8\x6d\x53\xb1\xdb\xa6\x61\x77\x91\x82\xdd\x22\xfd\xba\x99\x3a\x6a\x59\xec\xb3\x8c\x1b\x1f\x7f\x86\xfe\x63\xee\x7c\x95\xf4\x5b\x7d\x28\xb0\xb8\x07\xf3\x0c\xdb\x4f\x3f\xbe\x95\x55\x10\x29\xb4\xe5\x79\xc7\x0d\x8b\x73\x71\x90\xb2\x91\x89\xb0\x74\x3c\x55\xd2\x9d\xe6\xab\x69\xfe\xc9\xab\x34\x3a\xf3\x04\xa3\x73\xb3\x0f\xd7\x5f\xdb\xdb\xbb\xe3\x1d\xa4\xbe\xd6\x5c\x1d\x6b\xc9\xe1\xbe\xd6\xa2\x39\xea\x10\x0b\xe2\xaf\x7c\x40\xdd\x07\xe2\xec\x55\xf3\x0c\xb3\xb0\x39\xe5\x11\x64\x24\x11\x28\x8b\x1b\x2f\x05\x8e\x0e\x6c\x99\x98\xf8\x31\xe2\x61\x42\x59\xd4\x53\x79\x1f\x93\xe7\x52\x5c\x4a\x1e\x99\x97\xcf\x19\x79\x28\x2f\x63\x11\x5b\xe7\x9d\x88\x03\xec\x03\x81\xd2\xa7\xc9\xe1\x71\x8b\xde\x54\x15\x23\xab\x2b\xf7\xa3\x27\x0b\x71\x21\xa3\xe5\xf5\x0c\xde\x2f\x57\xab\xe5\xe5\xb4\xf1\x7c\x3a\x90\x58\x5a\x58\x2d\xaf\x51\xbf\x97\x77\x65\x5c\x25\x3e\xec\xea\x91\x85\x7f\xc3\x86\x72\x65\xe8\xcb\x78\x06\x0d\x7d\x69\x27\x0e\x6a\xb6\x9b\x3a\xfc\xf9\xe8\xdf\x05\xf1\x8b\x3a\x72\x13\x09\xc2\x62\xbd\x49\xed\xc1\x8e\x9c\x1a\xa1\x7c\xf9\x2e\x59\xbe\x7d\x0d\xd2\xe2\xa6\x79\xf9\xef\x70\xc2\x53\xda\x67\x35\xe9\xdd\x15\x3d\xfd\x65\x27\x6c\xdd\x35\x42\x7b\xd8\x87\x2e\xc2\x80\xea\x01\x64\xe9\x98\x92\x62\xe3\x22\x50\x9e\x6c\x0e\x6b\xbc\xe3\x47\xc6\x5c\xa5\xf6\xbd\x50\x18\x57\x4f\x3e\xef\xb1\xe1\xa0\x76\x38\x29\x4f\x4a\x38\x81\x1a\x07\xea\x4c\xcc\x97\x8b\xd5\xc7\xe5\x99\xca\x7b\x17\xff\x26\x0e\xd3\xa1\xac\x97\x15\xe7\xbf\xd6\x46\xce\xaf\xaa\xbf\xc5\x61\xba\xe2\x6f\xbe\x35\xbf\x5d\x4d\x3b\x91\x77\xef\x8d\x52\x1d\xbd\xc9\xae\x7b\x81\x54\x0d\xe9\x3f\xa8\xfb\x96\x52\x4d\x3e\x06\xa3\x9a\x92\xb1\x09\xd2\x97\xf9\x6b\xd0\x8e\xb7\x9a\xab\xbf\xe2\x22\xdd\xa2\x39\x6a\xb4\x69\xbf\x8f\x5e\x20\x28\x3a\x57\x39\x57\x12\x3c\x98\x2f\xfc\x01\xee\x1a\x55\x9b\x93\xa3\x07\x0d\xb5\xc5\x83\x86\x86\xc5\x83\x86\x1e\x34\xf4\xa0\xa1\x07\x0d\xfb\x3e\x7a\x9e\x38\xf4\xc4\xa1\x27\x0e\x3d\x71\x68\x5e\x3c\x71\xe8\x89\x43\x4f\x1c\xca\xe2\x89\xc3\x8e\x5d\x4f\x1c\x8e\x15\x4f\x1c\x0e\x56\xf3\xc4\x21\xce\x80\x27\x0e\x3d\x71\xe8\x89\x43\x4f\x1c\x7a\xe2\xd0\x13\x87\x9e\x38\xf4\xc4\xa1\x27\x0e\x0d\x0c\x78\xe2\x70\x92\x26\x7f\xdf\xc4\xa1\xce\xff\xed\xd1\x43\x8f\x1e\x7a\xf4\xd0\xa3\x87\xa8\xfb\xf6\xe8\xa1\x47\x0f\x3d\x7a\x68\x5a\x3c\x7a\x68\x58\xd9\xa3\x87\xed\x5a\x1e\x3d\xb4\xaa\xef\xd1\x43\x23\xf4\xb0\x9d\xf8\xe5\x86\x2f\x6a\x8e\x9f\xfd\x45\x5c\x16\x63\xc6\xe1\xd6\xc8\xf4\xda\xca\x7e\xe7\x5a\x93\x00\x26\x00\xb1\x24\x34\xce\x2b\x2a\xba\xf1\x39\xd1\xf0\x6d\x46\xfe\x0a\xec\xb1\xd7\x0d\x8b\x93\x5d\x18\x07\xb9\x69\x3e\x4a\x17\x67\x9f\xcf\xea\x8b\x62\x0d\xb8\x7e\xc4\xf8\xa6\x74\x82\x04\x8d\x2e\x2c\x17\x98\xe2\x91\x9b\x2e\x21\x30\x39\x7f\x00\x97\xf7\x07\xa8\x07\xa1\xf1\x27\xe2\x81\x36\x32\xc0\x45\x1e\x20\x38\xd2\x08\xa1\xb5\x6c\x92\xfc\x40\x30\x51\x8e\x20\x40\xe4\x09\xa2\x6c\xbd\x30\xb9\x82\x80\x38\x7a\xc1\xee\x28\x3f\xd8\x78\x28\x1d\x1e\xe9\x07\xb3\x63\xfd\x48\x8b\x75\xe8\x21\x1b\x3c\xda\x8f\x36\xdb\x44\x01\xf4\xc7\xfb\xd1\x26\x8f\x80\x03\x80\x21\x12\x80\x36\x5a\xe2\x72\xfd\x58\x00\xa1\x3b\xe4\xb1\xf2\x31\x34\x00\x6d\x58\x06\x3a\x87\xf0\x00\xb4\xc9\x16\x4e\xa0\x41\x04\xd0\x26\xfb\x91\x82\xc6\x9c\x87\x7f\xf8\xbd\x58\x01\xc9\x17\x20\xcb\x00\x5a\x40\x25\x21\x40\xe6\xb2\xef\xc3\x0b\x2c\x2d\xbb\x40\x0c\xc0\x1a\x33\x00\xa7\x33\xaf\x15\x6e\x00\xc7\x5f\x52\x58\x93\x07\x60\x4a\x1f\xe0\x87\x47\x32\x42\x20\xd0\xe6\xdc\x60\x98\x42\x20\xce\x63\xc9\xc3\x28\x89\x80\xff\x66\x96\x47\x34\x86\x69\x04\xe2\x94\xbe\x99\x80\x48\x80\x49\xa8\x04\x98\x84\x4c\x00\x33\x3a\x01\xff\xc5\xe8\xc6\x35\xd4\x58\x05\x71\x1c\x34\x88\x86\x0e\xa5\x40\x1d\x5a\x6d\xaa\x41\xed\x02\xca\xa7\x42\x4b\x36\x34\xc6\x16\xf9\xe6\xf5\x74\x83\x24\x16\xd0\x46\xdd\x13\x0e\x30\x09\xe5\x00\x86\xa4\x03\x65\x01\xe2\x3c\x1c\x03\xba\x90\x4c\x87\x78\xb0\x5a\x81\x9d\xea\xa9\x07\x5a\x33\xbb\x94\x44\x23\x3e\x83\x9f\xf6\x06\x49\x89\x92\x7e\x40\x9b\x75\x4f\x4b\xc0\x34\xc4\x04\x1c\x83\x9a\x80\x09\xc9\x09\x98\x90\x9e\xd0\xdb\x76\x48\x50\x80\x23\x8a\x02\x08\x91\x45\xb0\xa0\x29\xc0\x82\xa8\x00\x3a\x55\x01\x74\xb2\x02\xe8\x74\x05\x58\x11\x16\x60\x43\x59\x74\x2b\x63\x23\x63\x1a\x0b\xd8\xe0\x1c\xb8\x20\x2e\xc0\x05\x75\xd1\x6b\x04\x13\x2b\xeb\x33\x82\x0a\xd8\xf5\x19\x41\x45\xed\xc0\x19\x85\x01\x6e\x48\x0c\xb0\xa1\x31\x80\x4c\x64\x80\x3b\x2a\x03\xa6\x23\x33\x60\x32\x3a\x03\xcc\x08\x0d\x8a\xa3\x66\x94\xd2\xc0\x2f\xec\x25\xd5\x31\x4e\x6a\xe0\xb7\x21\x15\xd9\xd1\x4f\x6b\x50\x77\x4c\xc2\xe1\xdc\x43\x6c\xa0\x6d\xea\x08\x0f\x85\xda\x20\x6c\x41\x7a\x28\x8f\x8a\xdc\xc0\xf7\xe6\xa4\xa4\x07\x98\xd0\x1e\xe4\x4d\xe3\x10\xf1\x81\x5f\xe1\xb7\x08\x11\x0d\xf5\x41\x78\xb3\x34\x94\xc8\x38\xf9\x81\xbe\x8e\x02\x60\x68\xe9\x0f\x7c\x77\x28\xb4\xc8\x10\x01\x42\x19\x10\x6a\x83\xdb\x14\x08\xf5\xed\x55\x5f\x5c\x95\x04\xa1\xba\xaf\xbb\x53\x23\x91\xa1\x81\x8e\x87\xd2\x96\x1e\x81\xc9\x08\x12\x70\x41\x91\x80\x0b\x92\x04\x1c\xd0\x24\x40\x23\x4a\xc0\x2e\x72\x60\x4b\x96\x80\x25\x5d\x02\x4e\x03\x1f\x16\x94\x09\x1c\x3f\xec\x61\x09\x9c\x80\x19\x74\x82\x1e\x10\x5b\x29\x4a\xdd\x0f\x9e\xe0\x27\xbe\x4e\xac\xa3\x17\x3e\x41\xdb\x96\xb0\xca\x28\x80\x82\xb6\x2b\x80\x95\xe9\x20\x14\x98\x06\x44\x01\x43\x18\x85\xb6\x4a\x74\x0f\xa4\x80\x11\x94\x42\x0b\x2e\x09\x00\xa3\x17\x4c\xf9\x9d\x04\x7f\xc8\xc0\x07\x8c\x49\x53\xcd\x49\xa0\x0f\x14\xf2\x54\x12\x6a\xe9\x05\x55\x9c\x04\xac\xba\xb0\x0a\x75\xc9\x55\x87\x6a\x34\xc0\x0a\x65\x24\xf4\x46\x56\x1a\x91\x12\xb7\xab\x24\xdb\x23\x0e\xbd\x3a\x52\x94\x70\xc8\x14\xb0\x0b\x4c\x06\xbc\x80\x5b\xe8\x05\xc8\xee\x69\x12\xfc\x02\x16\x00\x0c\xd0\x21\x18\xb0\x06\x61\xc0\x16\x86\x01\x32\x10\x03\x64\x28\x06\xe8\x60\x0c\x58\xc0\x31\x40\x06\x64\xc0\x06\x92\x01\x5b\x50\xa6\x6b\x80\xe6\x8f\xb7\x03\x66\xc0\x01\x34\xd3\x67\x03\xef\x02\xb7\x83\x67\x7a\x6c\x10\x5c\xf1\xf6\x10\x8d\xa6\x2d\x98\x1c\x5e\x80\xce\xe3\x05\xce\xf4\x06\x50\xc9\x96\xe0\xfb\xca\xe9\x05\x53\xe5\xf5\x02\xbb\xdc\x5e\xf1\x61\xc7\x17\xd3\xc7\xe4\x65\xae\xca\x4b\x62\xab\xbb\xde\xc1\x63\x1b\xd2\x09\xdb\x54\x9d\xe7\x49\x99\xff\x7b\xa4\x4c\x35\x3a\x3c\x27\xe3\x39\x99\xd1\xe2\x39\x19\xcf\xc9\x78\x4e\xc6\x73\x32\x9e\x93\xf1\x9c\x0c\x62\x41\xe1\x29\x19\x4f\xc9\x78\x4a\xc6\x53\x32\x9e\x92\xf1\x94\x8c\xa7\x64\x3c\x25\xe3\x29\x19\x4f\xc9\x78\x4a\xc6\xb4\xaa\xa7\x64\x3c\x25\xe3\x29\x19\x7d\xf1\x94\xcc\x40\xf1\x94\x8c\xa7\x64\x3c\x25\x53\x36\xda\x53\x32\x9e\x92\x69\x16\x4f\xc9\x78\x4a\x06\x6f\xc4\x53\x32\x9e\x92\x31\x0b\x7a\x78\x46\xc6\x33\x32\x9e\x91\xf1\x8c\x8c\x67\x64\x3c\x23\x03\x9e\x91\xf1\x8c\xcc\x68\x55\xcf\xc8\x10\x6a\x7a\x46\xc6\xa8\xb2\x67\x64\x3c\x23\x33\xde\x16\xcf\xc8\xfc\x71\x19\x99\x7d\xb8\xfe\xda\xde\xdc\x1e\x0f\x97\xb9\xd6\x5c\x1d\x6b\xc9\xf5\xae\xde\xa2\x4d\xea\x78\x0b\xe2\xaf\x7c\x74\xdd\x07\xe2\x98\x61\x27\x49\x90\xe9\xf2\x84\xc2\x0b\x6c\xc2\x94\xad\x69\xa4\x8b\xed\x4b\x7a\x56\x5e\x9a\x62\xe2\x18\xcf\xb2\x6a\xa0\x4d\xbe\xdc\x37\x1f\xa5\x43\xe5\x90\xb1\x02\x7a\x91\x0f\x1b\x19\xac\xca\x58\xc4\xd6\x79\x27\x13\x3e\xec\x83\x8c\xff\x95\x26\x87\xc7\x2d\x7a\xeb\x55\x0c\xaf\xce\x01\x21\x38\x3b\xbf\x59\xc8\x6f\xf1\xa7\xab\xdb\xeb\xc5\x87\xf3\x9f\xce\x17\x67\xb8\x99\x66\xb5\xbc\x9e\xc1\xfb\xe5\x6a\xb5\xbc\xc4\x9c\xfd\xc0\xe7\x5d\xd4\xb6\x15\x65\x61\xb5\xbc\x46\xfd\x5e\xde\x95\x71\x95\xf8\xb0\xab\x47\x16\xfe\x35\x0b\xe3\x9c\x3d\xa2\x3e\x66\x7c\x6f\x1a\xe4\xa2\xe6\xdf\xfe\x42\x7f\x3f\xaf\x9a\xed\xa6\x0e\x7f\x3e\xfa\x77\x41\xfc\xa2\x8e\x5c\xe1\x4c\xc1\x67\x8c\x6e\x0f\x76\xe4\xfc\x08\xe5\xcb\x77\xc9\xf2\x2d\x8e\x8b\x72\x33\xe3\xdd\x34\x2f\xff\xbd\xce\x7a\x4a\x23\xad\x66\xbe\xbb\xa2\xbb\xbf\xec\x84\xad\xbb\x46\x8c\x15\xfb\xe4\x85\x03\x52\xf5\x20\x4b\xdf\x19\x8b\xf3\xe8\x45\x9e\x82\x48\xd0\x01\xd5\x7c\x5b\x06\xce\x9f\xb7\xe1\x7a\x5b\x71\x4b\x8d\xe3\xfe\xfb\x20\xc5\xdb\x6c\x8d\xf5\x82\xfb\xa1\x06\xd0\x3a\xb3\xf3\xe5\x62\xf5\x71\x79\xa6\x4c\xcd\xe5\xbf\x89\x53\x99\x28\xeb\x65\xc5\xf9\xaf\xb5\x91\xf3\xab\xea\x6f\x71\x22\xb3\xf8\xfb\x62\xbe\x5a\xdc\xae\xa6\x9d\xcd\xbb\xf7\x46\xa9\x8e\x4e\xfc\x5a\xf7\x02\xa9\x1a\x32\xa7\x6d\xdd\xb7\x94\x6a\xf2\x31\x18\xd5\x94\x08\x5b\x90\xbe\xcc\xf1\x48\xb3\xfd\x9a\xfd\x56\x73\xf5\xd7\x5e\xb3\x5b\xb4\x49\x8d\xb6\xed\xf7\xd1\x0b\x04\x45\x0f\x37\x4f\x0a\x41\xf0\x80\xd9\x7b\x56\xc0\x3c\x9f\x1d\xf9\x84\x73\xc8\xcd\x1d\x92\x24\xba\x9d\x4e\x07\xbb\xf9\x0a\xd2\xa9\x60\x03\x22\xb8\x26\x7c\x71\xfb\xe2\x01\x1a\x58\xa1\x7b\xb1\x1f\xae\x2e\x09\x7c\x0c\xb2\xd7\x84\xea\x15\x5f\x3b\x94\xd5\x41\xa2\xb7\x24\x33\x51\x16\xc7\x68\x5e\xf9\xc9\x43\x99\x1c\x22\x79\xdb\x64\x2e\x6e\x88\x74\x28\x5e\x57\x10\xaa\x7a\x86\xa5\x87\xc8\x45\x59\xac\xd9\x8f\x01\x1a\x17\xb7\xff\xac\x59\x9a\x01\x12\x17\x37\x48\xbb\xf8\x8f\xe5\x69\x1e\x3b\x02\xd7\xcd\xec\x66\x45\xde\x1e\xf5\xb3\x67\x4d\xdd\x1a\x11\xb7\x58\x47\xc7\x30\x6d\x5b\x9c\x4d\xc3\x0d\xdd\x21\xd2\xb6\x22\x67\x51\x26\xc7\x28\x5b\x0a\x66\x37\x46\xd8\x96\x91\x6b\x94\x51\xf7\x74\xad\x7b\xb2\xd6\xfd\xd1\x0a\x03\xa2\x96\x7e\xb4\x62\xf0\x58\x45\x45\xc7\xa2\x3f\x83\x4d\x92\xb6\x8f\x8c\x45\x19\xed\x1c\xcc\xd0\x52\xb1\xc8\x65\x45\x77\xfc\x68\x89\x58\xa4\x17\x32\x55\x8f\x66\x38\xa2\x61\xdd\x93\xb0\x26\x14\xac\xc5\x87\x75\xf8\xa4\x06\x76\x16\xed\xa7\x5f\x55\x9a\x15\x65\x57\x4b\xbe\xf6\x91\xac\xb8\xd7\x6a\xe4\x88\x46\xe5\x06\x21\xcc\x7f\x7f\x9f\x80\x60\x9d\x80\x5e\x9d\x9c\x5c\x9d\x8a\x5a\x9d\x8a\x58\x9d\x94\x56\x75\x41\xaa\xe2\xfd\x5b\x34\x42\x95\x4a\xa7\x12\xc9\x54\x22\x95\x4a\x24\x52\xe9\x34\x2a\x99\x44\xb5\xa3\x50\x2d\x09\x54\x6b\xfa\xd4\x9a\x3c\xb5\xa6\x4e\xad\x89\x53\x6b\xda\xd4\x0d\x69\xea\x80\x32\x25\x13\xa6\x34\xba\xd4\x11\x59\x3a\x11\x55\x3a\x0d\x51\x6a\x40\x93\xa2\x97\xb2\x63\x24\x69\x49\x86\xa2\x8c\x8e\x52\xa4\x0d\x2a\x14\x65\xb8\x97\x20\x6d\x9e\x05\x40\x59\xec\xa1\x47\xb5\x34\x28\x72\x35\x5f\x93\xa3\xfd\x24\x28\xd2\x39\x2b\xa9\xd1\x89\x29\xd0\x51\x02\x94\x14\xbd\x1b\xa2\x3f\x3b\x34\x27\x72\x37\xd2\x26\x3f\xf5\x24\x27\x6e\x2b\x3a\x46\x7d\xf2\x5b\xa1\x6d\x70\xb4\xc4\xa7\x0d\xab\x38\x40\x7b\x76\xe8\x4d\x9c\xe1\x16\xe9\xa9\x25\x37\xe9\xcf\x6a\xd6\x4b\x6d\xe2\x5a\x59\x73\xaf\x6e\x89\xcd\x69\x68\x4d\x6b\x52\xd3\x9a\xd2\xb4\x25\x34\x09\x74\x26\x19\x6d\xb4\xa3\x32\x6d\x88\x4c\x37\x6e\x70\x0b\x12\xf3\xa8\x4e\x70\x4b\x0a\xd3\x80\xc0\x44\x8b\x5d\x0c\xd2\x97\x4d\x6f\x36\x6e\x1a\x1e\x26\x2f\x0b\x92\x12\xf7\xa9\x1f\xa1\x2e\x25\x45\x89\xf6\x2e\x4f\x42\x5c\x4e\x40\x5b\x9a\x90\x96\x62\x19\x81\x73\xdb\x3a\xa7\x2c\xc7\x09\xcb\xfa\x99\xe1\x5c\x76\x7d\x74\x65\xd3\xa5\x4f\x18\xa4\x1a\xf7\xbf\xea\xd2\xb7\xf0\x83\xb7\xa9\xca\xa2\x4b\x70\xf7\x3d\x4c\x54\x36\x08\x49\xf4\xc9\x31\x1d\x4d\x69\x1d\xaa\x18\x76\xd8\xd3\xa2\xeb\x2d\x8a\x52\xef\x63\xa7\x4c\x50\xce\x95\x26\xa7\x50\x99\xec\x20\x89\x0e\x49\xc8\x69\x28\x48\x87\x04\x24\xde\xe9\x49\x22\x1f\xa9\xd4\x23\x91\x78\xb4\xa3\x1d\xad\x48\x47\x1a\xe5\x48\x23\x1c\x89\x74\x23\x95\x6c\xa4\x51\x8d\x64\xa2\xd1\x8a\x66\xb4\x23\x19\x6d\x29\x46\x5b\x82\xd1\x96\x5e\xb4\x25\x17\x6d\xa9\x45\x17\xc4\x22\x91\x56\xcc\xd5\xbd\xc3\x45\x10\x3f\x1e\x82\x47\x66\x32\xc7\x21\x37\x4b\xad\x4d\xd2\x4a\x7f\x61\x03\x43\xea\xbe\x43\x62\x7c\xd5\xb9\x99\x07\x96\xab\xa7\xe5\xc7\x67\xf7\x43\x1c\xe6\xcb\x27\x96\xa6\xe1\xe6\x08\xf7\xfd\xa9\x71\x35\xec\xcd\xf2\xa5\x21\x6f\x2d\x5f\xdc\x88\xb5\x76\xe1\xb1\x92\x8b\x11\x71\xeb\x86\xde\x1e\x65\x59\x24\xce\xc9\xc5\xd2\xb1\x50\xe4\xd4\x10\x57\x11\x48\x40\x62\xde\x56\xfe\xfd\x7e\x91\x55\xc5\xb1\xd1\x60\x2d\x20\x82\x58\xd0\x7c\xb2\x71\xc5\xf9\x46\x7e\x23\x92\xd4\x31\x32\x1b\x66\xb5\xbb\x36\x90\x7f\xff\x76\xc7\xaf\x73\x67\x8e\xa3\xa6\xec\xe1\xcb\xd3\x8f\x6f\x53\x96\xe5\x6f\x9f\x7e\x7c\x5b\x82\xa0\xa7\x72\xd1\x79\x56\x74\x73\x62\x4a\xab\x16\xce\x9c\x18\xee\x2e\x5b\x06\x06\x4e\x15\xe4\xec\x5b\x3e\x34\xc2\x0c\x8f\x9d\xb7\xdf\x24\xf6\xcd\xe8\xd7\x06\x2e\x85\x11\x53\xad\x37\x2f\x0d\x9e\x8b\xd7\x80\xaf\xd6\x77\x41\xfa\x75\x93\x3c\xc7\xb0\x09\xb3\x7d\x14\xc8\x10\x00\xfb\x96\x1f\x82\xe1\x93\xb6\x7c\x0c\x8f\xe8\x1f\xc9\x5b\x58\x27\xf1\x43\x14\xae\xf3\x41\x37\xd4\x09\x7c\x7b\xf9\xb0\x0d\xd2\xa1\xdb\x38\x81\xac\x74\x9d\x0c\xfe\xea\x3e\x0a\xe2\xaf\x83\xbf\x88\x92\xc7\xec\x3a\x88\xd9\xd0\x1d\x9a\x9e\x53\x2f\xfa\x61\x6c\x06\x42\xcc\x3e\xed\xd3\x99\xf2\x02\x23\x95\x3a\x13\x0e\x7f\x86\x65\xe3\x0a\x57\x42\xf1\x84\xc7\x96\xce\x05\x8b\x37\xd9\x0d\xfd\x64\x32\x83\x60\x87\xbf\x34\x6a\xe0\x0a\xd3\x90\xae\xed\xde\x0a\xb3\xa2\x0f\x72\xd4\x21\x9e\xf2\xe0\xce\x4f\xcb\x9b\xcb\xf9\xaa\xc5\x3a\xcd\x6f\xfe\x71\xb6\xfc\xe5\x6a\x06\x37\xf3\x5f\xc6\x8e\x6b\x98\xec\x56\x4e\x34\x97\x19\xad\x52\xb6\x62\xf4\x87\x37\xf3\x5f\xfa\x67\xc3\x30\x8f\x06\x3f\xb8\x86\x23\xa3\xb3\xb0\xc8\x07\xfb\x58\x79\x6e\x4b\xf1\xbf\x41\x24\x3f\x49\xa2\x45\xa5\xe7\xe0\x59\x8c\x89\xfe\x11\x5e\xcc\x33\xee\xe7\xf3\x5f\x47\x27\x30\xcc\x98\x1e\xb7\xd6\x9a\xd5\xd7\xfc\xe7\xad\xb0\xdf\x68\xfa\x3e\xd4\x0c\x6d\x36\xfb\xe6\xc3\xdf\x22\x57\xd3\xb3\x49\x0e\xce\x13\x71\xff\xb7\xc3\x21\x71\xe3\x79\x9e\xf7\xaf\x1c\x77\xa3\x01\x15\x04\x87\xd6\x9e\xec\x1b\x57\x71\x38\x43\x16\xa3\x09\x61\x5d\x19\x5d\x67\xf2\xc3\x01\x89\xac\x2a\xbc\xc4\xfc\x5d\x13\x7d\x32\xb6\x52\xc7\x00\x5f\xbb\xe4\x18\x6b\xf9\xcb\xc4\x68\x5d\x6c\xd9\xc1\xfc\x2a\x86\xa1\x99\x2e\x95\x20\xdf\x66\xde\x1d\x1a\xa9\x83\xcb\xe5\xd9\x02\xaf\x72\xf0\x61\x79\xb1\xbc\x99\xc1\xaf\x5f\x6e\xe6\x9f\x67\x70\xbb\x9a\xaf\x6e\x4d\xce\x0d\x9a\xba\xce\x4e\x3a\xcd\x32\xaa\x24\x5a\x65\xf4\x4b\xd1\x70\xa3\x5f\x8a\x7b\x1b\xfc\x65\x39\x2f\x98\xbd\xc8\x26\x07\x9e\xda\x42\x21\xe3\x13\x0f\x0c\x6b\xf8\x88\x8d\x62\xb5\x62\x93\x38\x44\x98\x99\xbd\x72\xa4\xb0\x33\x25\xd4\x6c\x18\x5e\x46\x71\xb9\x84\xd7\xce\xb0\xb7\x31\x99\x9b\x4f\xda\x2e\x96\xd1\x1a\x38\xb0\x35\x62\x8f\x2c\xde\xac\xd8\x6e\x1f\x05\xb9\xd1\x9c\x47\x08\x73\xb7\x06\xe5\x85\x72\x4d\x23\x0b\xea\xcc\x34\x17\x61\x58\x5e\xb9\xdc\x40\xf2\x2f\x41\x1c\xec\xf8\x9f\x77\xb5\x6b\xc8\x34\x7a\xd0\x61\x90\xc4\xa0\x2f\x0e\x48\x85\x19\x64\xdb\xe4\x10\x6d\x44\xf0\xc6\xd0\x62\xd1\x2c\x11\x7e\x14\x64\xec\x3e\x89\x8a\x78\x5f\xb1\x3e\x14\xf1\x9e\xbb\x7f\xfb\x9f\x28\xb8\x67\xd1\x17\xde\x31\xff\x7b\x67\x1a\x44\x6d\x48\x2e\xa4\x2c\x4b\xa2\x27\x56\xf2\x9f\xc2\xda\x9b\x37\x99\x9c\xa3\x4f\xc1\x64\x66\xdd\x85\xf1\x9c\x02\x36\xdb\x0e\x83\xcb\xce\x75\xf1\x43\x41\x5d\x80\x47\xc9\x33\x4b\xe1\x3e\x39\xc8\x70\x27\x22\x44\x57\x44\x9d\xf9\x7b\xc9\xe2\xf5\x4b\xb1\xb2\x08\xb3\x6a\x24\xcc\xc4\xa9\x30\xc6\xdb\x6a\x7c\x22\xea\xfe\xa5\x08\x39\x55\xe7\x19\x77\x61\x1c\xee\x0e\xbb\x86\xcc\xae\x8c\x51\x99\x93\x7e\x87\x8c\xc9\x98\x79\x73\x79\x2d\x7d\x98\x3f\x25\x29\xb0\x6f\x01\x6f\xe6\x0c\x42\x4c\x6c\xb6\xcc\x08\xbe\x3f\xdc\x47\x61\xb6\x65\xbc\xf7\xd6\x0c\xd8\x13\x37\xfb\xc3\x3b\xde\xec\x43\xce\x04\xce\x6d\x6a\xf2\x6e\x17\xc6\x5f\x34\x88\x79\xfd\x26\x95\x90\xf1\x0f\xa6\xae\xf2\xa2\x15\x22\x2b\xc4\xb3\x30\x13\x27\x39\xec\x82\xaf\xbc\x1f\xe2\x8c\xd5\xfe\xdb\x20\x36\x7d\x44\xa2\x85\xf2\xfe\x83\x5c\x9c\x8c\x94\x57\xa9\x70\x76\xa3\xf3\x5f\xfb\x28\xc9\x57\xfc\x6d\x38\xca\x7b\x73\x5d\x5c\x0d\x57\x17\xff\x15\x2b\xaf\x63\x7c\xae\x47\xe7\xd2\xa8\x5e\xa1\xea\xc1\xf3\xce\x32\x7f\x87\x0a\x10\x51\x2e\x37\xba\x4b\xd0\xeb\x8b\xe5\xea\xcb\xea\xf3\x35\x61\x1d\x0a\x70\x71\x7e\xb5\x10\x2b\xd0\x0f\xff\x58\x9c\x7d\x99\xdf\x2c\xe6\xf5\xff\x7b\x3f\xbf\x99\xc1\xc7\xc5\x7c\x75\x39\xbf\x36\xc3\x5a\xcc\x63\xbb\x27\xfa\x66\x1b\xd6\xe5\xad\x36\xfc\x69\xf3\xd6\x90\x55\xde\xcf\xcd\xc2\xb0\x27\x65\x27\x99\xec\x96\xd4\x95\x0c\xe6\x6d\x41\x68\xa9\x0c\x07\x8a\xa6\x7e\x69\x28\x97\xeb\x59\x7d\x17\x87\x86\xf9\xa7\x48\x4c\xf0\xe2\x23\x62\x7e\x34\xa4\x09\x96\x3e\xa4\xc9\x4e\xbc\x47\xb7\x79\xb0\xfe\xba\x49\xc3\x27\x96\x16\xaa\x98\x19\xcc\xaf\xcf\x0d\x65\x2c\xd1\x92\x29\x39\x51\xf4\x92\x24\xa1\x33\xf0\xe8\x91\xa2\xa6\xee\x9e\x3f\xb9\x09\xca\x98\x90\x75\x61\x1f\xa4\xc1\x8e\xe5\x2c\xcd\xd4\x50\x25\x22\xcc\x8b\x3d\xd3\x6c\xbe\x51\x29\xcb\x49\xa1\x2c\x67\x5c\x81\x22\xc4\x03\x4d\x32\x84\x72\xac\x18\x39\xb0\x40\x23\xc5\x43\x4c\x70\x36\xdd\xd8\xa2\xb6\x48\x19\x6a\xef\x5f\x60\xc3\x1e\x82\x43\x94\xcf\x0a\x7d\xa5\x67\x4a\xc6\xa6\xe6\xe4\xa3\xa0\xc8\x9f\x84\x96\x67\x79\x9c\x1d\x69\xb5\x21\x6c\xb1\xe3\x3b\xb5\x7d\x2b\x37\x0a\x3e\x3f\x47\x9d\x0c\xec\x29\x64\xcf\xd5\xe6\x6c\xcc\x5f\xdd\x2d\xb4\x51\x0c\x76\xa2\x52\xb2\xb8\x49\x61\x64\x2b\x30\x25\x8b\x81\xcc\x14\xda\x66\xcd\xc1\x64\x83\x62\x53\x04\xc3\x4d\x79\x2a\x8d\xe4\x14\xc1\xe2\x71\x44\xaa\xca\x6b\x0d\x4a\x55\x11\x2c\x56\xe2\x56\xc3\x82\x55\x04\xcb\xa5\xe4\xd1\x98\x6c\x15\xc1\xb4\x24\xf0\x86\xc4\xab\x08\x46\x5b\x72\x57\x1a\x09\x2b\x82\xd1\x7e\xd1\x2b\x31\x27\x12\x2c\x3a\x95\xbe\x92\x65\x5c\x00\x8b\x32\x54\xb3\x29\x64\xb0\x74\xb6\xbb\x62\x58\x14\x93\xb4\x64\x78\x76\x22\x5a\xb2\xb8\x9c\xcd\xad\x04\xb5\x5a\x16\xa7\x5c\xae\x58\xcb\x69\xc9\x62\x24\xaa\x45\xf9\x4a\x24\x23\xd2\x5a\xd4\xb9\x3b\xe8\x17\xd8\x22\x7d\xcc\x6a\x49\x2e\x8d\xcc\x16\xd1\x62\x5b\x98\x4b\x15\xdb\x22\x18\xed\x97\xe7\x2a\xbc\x3a\xb4\xaf\xee\x80\x48\x17\x29\x13\x1a\x38\x96\xea\x92\x65\x9a\x5c\x68\xa3\xb2\x5d\xc4\x67\xaf\xcb\x2d\x56\x12\x3c\x14\x93\x5a\xb9\xaf\x5a\xc2\x8b\xb2\xac\x18\x14\xfd\x52\x3b\x82\x32\x56\x7b\xa4\xbf\x1a\x23\x8d\xb2\x9c\x1d\x14\x00\x93\xa2\x5e\x04\xb3\xee\x65\xc0\x64\x71\x2f\x06\x26\x8b\x73\x5c\xa9\x68\xee\x04\xd0\x92\x2c\x1d\x8e\x48\x95\x07\xa3\x3d\x32\x45\x50\x4c\x2f\x12\x46\x6d\x6a\x57\x56\xac\xc1\x32\x51\x5e\xb6\x41\x71\xb1\xd2\x27\x4f\x30\xec\x5e\x62\x4c\xb1\xeb\x52\x68\x4c\x31\xdc\x2b\x37\x66\x69\xb2\x10\x28\xd3\x8b\x8e\xd1\x6d\xab\x32\x65\x7a\xe9\x31\x27\xd6\xff\xfa\x4e\x67\xfd\x9d\x8d\x75\x8d\x64\x99\x13\x19\x32\x59\x28\x59\x09\xe9\x92\x64\xad\xda\x84\x6c\x79\x64\x79\xb2\x76\x65\x74\x8a\x3f\xb2\x54\x59\xbb\x32\x25\xe3\x9e\x85\x6c\x99\xae\x3a\x25\x5b\x9e\xb5\x84\x59\xcb\x88\x4d\xe6\x3e\x07\x72\x66\x03\x66\xb0\x79\xf3\x1c\x48\x9b\xf5\x9b\x41\x67\xf1\x73\x25\x73\xd6\xb2\x65\x9b\x14\x90\x2c\x79\x26\x0b\x4d\xf8\x4c\x16\x47\xf2\x67\xb2\x8c\x8a\xa0\x11\x66\xfa\x52\x36\x6d\x48\x0a\xcd\x72\x3d\x78\xaa\x17\x44\x23\x6d\xb8\x5a\x12\x6a\x56\x7a\x44\xb2\xf4\x09\xa9\x55\xe2\x68\x94\x35\x9b\x63\x39\x35\x59\xc6\x44\xd5\xa8\xae\xf8\x21\x69\x35\x9a\xab\xa1\x29\xc6\xd6\x27\xb0\x66\xb7\x95\x95\x0e\x91\x8e\xcc\x1a\xa5\xb9\xee\x85\xd9\x64\x19\x95\x67\xb3\xd8\xc4\x0e\x89\xb4\x51\xc6\x6b\x4b\xd6\xad\x2d\xd5\x46\x69\x69\x8f\xb8\x5b\x35\x72\x29\x9e\xfc\x01\x89\x37\xa2\x72\x9a\x2c\xae\x85\xde\x64\x19\x97\x7b\xb3\x88\xca\x6d\xfa\x44\xdf\x08\x36\xd5\x09\x40\x23\xfd\x46\xf4\x64\xb5\x35\x2b\x5b\x02\x70\x24\xcf\x53\xbf\x64\x1c\x9c\xe3\x72\x22\xc8\x32\x81\x70\x9c\x2c\xd6\xf2\x71\xb2\x58\x8b\xc8\x35\xcd\xd0\xa5\xe4\x64\x21\x08\xca\xc9\x62\x15\x5f\xb1\x15\x97\x93\xc5\x46\x62\x4e\x16\x97\x41\x22\x0b\xb9\xb9\x96\xbd\x29\x43\x44\x96\x62\x73\xb2\x18\x48\xce\x51\xe6\x2c\x99\xa5\xb6\x4f\x78\xce\x66\x6e\xe9\xa4\x6f\x29\xe5\xe7\xa8\x11\xf8\xa6\x60\x5d\x57\x84\x8e\x64\xb5\x2b\x5b\xa7\x48\xd1\x51\xd6\xc1\x0e\xc5\xeb\x64\x99\x40\xc2\x4e\x96\x31\x21\x3b\x4a\x48\xbf\x94\x12\xee\x91\xb3\x23\x98\x54\x04\xf0\xba\xa2\x76\x94\x47\xd4\x2b\x83\xd7\x08\x97\x51\xfc\xd6\xbd\x62\x78\x24\x39\x56\x59\x5c\x4a\xe2\xc9\x32\x2a\x8c\x47\x5c\xa6\x8c\xc8\xe3\x11\xac\xd6\x82\x7a\x5a\x91\x3c\x87\x81\xc1\xae\x54\x1e\x79\x57\xd1\x08\x86\x69\x04\xf3\x68\xef\x7f\x6f\xe4\xaa\x11\x87\xa2\x9d\x13\x32\x10\x6d\xa7\xad\xda\x86\x52\xda\xc0\x3d\xa9\x7b\x27\x10\xdd\x93\x65\x1a\xe9\x3d\x59\x1c\x0a\xf0\xc9\x42\x75\xf7\x93\xc4\xf8\xd4\xca\x58\x49\x3e\xb5\x36\x52\x98\x4f\xad\x4c\x93\xe7\x53\x6d\x90\x44\xfa\x54\x13\xb4\x80\x05\x45\xb0\xaf\x55\x97\x16\x66\xa1\x89\xf7\xa9\xb5\x69\x91\x12\xa2\x90\x9f\x5a\xdd\x2a\xd0\x61\x23\xea\xa7\xda\xb0\x0d\x73\xd8\x09\xfc\xf5\x5b\xa1\x84\x15\xec\xc4\xfe\x7a\xad\x90\x42\x1c\xf6\xc2\x7f\xda\xf6\x98\xca\xff\xd5\xe5\x01\xc9\xf7\x80\x13\x85\xf7\x42\xc8\x09\x07\x9c\xc0\xb0\xe0\xc1\x6f\xf5\x11\x6d\x64\x37\xca\x3e\x30\xd7\x94\x7b\xfa\xf1\xad\xac\x62\x28\x1a\x57\x17\x79\x6e\x7d\xc3\xe2\x5c\x1e\x8b\x17\x6c\xb1\x58\xd9\x95\x19\x2d\x4a\xd7\x19\xda\x63\xca\x3f\xe4\xa5\xba\x1d\x5f\x17\x08\xf4\x0c\xf3\x89\xdd\x87\xeb\xaf\xed\x8d\xfc\xeb\x20\x3a\xd7\x9a\x96\xd0\xec\x39\xf7\x6c\x58\x37\x4d\x1d\xc0\x41\xfc\x95\xef\x41\xef\x03\x71\x46\x95\xec\x81\x95\xa3\xf1\x58\xe0\xcb\x26\x4c\xd9\x1a\x0f\x70\x81\x63\xff\xd7\x59\xd9\x8c\xef\xc8\xf1\xa5\x1b\x1e\x55\x3b\x1d\x9c\x8d\xfe\x28\x5d\x56\x87\x8c\x15\x90\x97\x18\x3f\xb4\x8d\x55\xc6\x22\xb6\xce\x3b\x29\x95\x60\x1f\x64\xb4\x38\x60\x9a\x1c\x1e\xb7\x52\x23\x43\x8e\x47\x17\x47\xcc\xce\xce\x6f\x16\x72\x05\xa2\xe8\xe1\xad\x96\xd7\x33\x78\xbf\x5c\xad\x96\x97\xc7\x3b\x35\xa4\x6d\x0b\xc1\xce\x6a\x69\x42\x5d\xb7\x6b\xc9\xbb\x45\x56\x8c\x0f\xbb\x7a\x3c\x52\xdf\xd7\x30\xce\xd9\x23\x61\xff\x5e\x08\x41\xf2\xfa\x7f\xfb\x8b\xed\xeb\x7e\xd5\xbc\x13\xbb\xd7\x88\xbf\x45\xbb\x20\x7e\xb1\x8d\x7c\x0b\x27\x97\x7c\x23\xf9\x4b\xa3\x7b\x05\x90\x66\x8b\x17\xfa\x92\xe5\xdb\xd7\x26\x0a\x6f\x9a\x4d\xf9\xce\xa7\x58\xa5\xad\x0e\xa6\xd9\xbb\xe2\x39\x7c\xd9\x09\x8b\xe8\x04\xc7\xb2\xb4\x43\xf7\x6a\x50\x40\xba\x32\x59\x4c\x3b\x72\x20\xb2\x15\x26\x22\xa1\x5a\x8d\x8f\xd4\xf2\x42\x04\x8b\x2a\x29\xb3\x0f\x0a\x78\xd0\xfa\x15\x29\xc0\xbb\xa6\x93\xd1\xc1\x67\xe1\x72\xb1\xfa\xb8\x3c\x6b\x69\xa4\xca\x7f\x13\x07\x90\xe9\x26\xc5\xa1\xe3\xf2\xef\xf3\xab\xea\xef\xdb\x4f\x97\x16\x56\x2f\xe6\xab\xc5\xed\xea\x78\x9f\xaa\x6e\xff\xd0\x8d\x10\x1d\x30\x75\x7f\x5a\x54\x26\xf9\x9c\xea\x27\x46\xaf\x2c\x1f\x18\xa2\x7e\xa6\xc9\xb4\xf5\x3a\xdb\x27\x5d\xce\xaf\xef\x64\xfb\x64\xdd\x34\x35\x12\xbc\xdf\x47\x2f\x10\x14\x5d\xdf\x3c\x25\x87\xdd\x3b\x3f\xe4\x2c\x85\xbb\x86\x81\xbb\xc6\xd9\x2b\x2f\x28\xe0\x05\x05\xf0\x16\xbd\xa0\x80\xbe\x78\x41\x01\x2f\x28\xe0\x05\x05\x0c\xca\x1f\x46\x50\x40\xb7\x28\xf0\xca\x02\x5e\x59\xc0\x2b\x0b\x78\x65\x01\xbc\x49\xaf\x2c\x50\x17\xaf\x2c\x20\x9b\xeb\x95\x05\xbc\xb2\x80\x62\xd7\x2b\x0b\x78\x65\x81\xe1\xe2\x95\x05\x08\x95\xbd\xb2\x80\x57\x16\x18\x35\xe3\x95\x05\x94\xe2\x95\x05\xfa\x8b\x57\x16\xf0\xca\x02\x5e\x59\xc0\x2b\x0b\x78\x65\x01\xaf\x2c\xe0\x95\x05\xbc\xb2\xc0\x60\xf1\xca\x02\x7f\x00\x65\x01\x5d\xac\xc8\x4b\x0c\x78\x89\x01\x2f\x31\xe0\x25\x06\xbc\xc4\x00\xae\x78\x89\x01\xf0\x12\x03\x55\xf1\x12\x03\xa2\x78\x89\x01\xaa\x0d\x2f\x31\x40\xa9\xed\x25\x06\xbc\xc4\xc0\xa8\x95\xdf\xaf\xc4\x40\x3b\x91\xe8\x0d\x5f\xfe\xbd\x6e\x36\x51\xd1\x04\xbc\x31\xe7\x5b\x59\x5c\x3b\x94\xfd\xe9\xb5\x26\xa1\x68\x00\x62\x69\x0d\xf7\x2c\x7f\x66\x28\xaf\x58\xfe\x9c\xa8\xd9\x18\xa5\xb6\x81\xf9\xb7\x93\x86\x3c\x6c\x58\x9c\xec\xc2\x38\xc8\x93\x57\x92\x19\x38\xab\x1b\x40\x33\x33\xcd\x90\xa0\x36\xab\x13\x80\x6b\x74\x70\xb9\x10\x17\x43\x04\xb7\x28\xc2\x67\x92\x05\x4a\x36\x59\xb0\x43\x67\xa8\xcc\x15\xd8\x8c\x27\x70\x97\x5d\x16\x8e\x3a\xae\x6c\x5a\x39\x45\xd6\x59\x30\xc9\x3c\x4b\xf1\x97\x08\x0f\xf7\x48\xf6\x59\xa2\x1f\xc6\x59\x06\x5a\xb0\x1a\xf9\xe0\x02\x1c\x03\x7b\x2f\xb8\x73\x80\x0c\x26\x83\xc8\x60\x0c\x24\xa3\xda\x2c\xf0\xb3\x1e\x98\x8c\x68\xb5\xf2\x1a\x1f\x13\x28\x83\x49\xa0\x32\x18\x01\xcb\x88\x26\x35\x70\x96\xf5\xa9\x7c\x90\x9e\x21\x15\x49\x6b\x00\x66\x44\x93\x3d\x58\x9a\x80\xcc\xdc\xdd\xbd\x15\x9a\x06\x93\xe0\x69\x30\x86\xa8\x51\x07\x69\x36\x80\xa9\x11\x6d\x96\xdd\xa8\x43\xd5\x2c\x4d\xf6\x77\x2c\x7d\x76\x88\x5d\x45\x73\xdd\x60\x6b\x30\xc1\xb7\xc4\x01\xbe\x06\xaf\xb5\xc0\x72\x44\xb2\xc1\x84\x34\x1b\x4c\x44\xb4\xc1\x34\x54\x1b\x8c\x90\x6d\xe4\x96\xf6\xd3\x6d\xd4\x19\xaf\x60\xe2\x34\x84\x9b\xd5\x44\x52\x72\x71\x0a\xe5\x46\xed\x49\x1d\x1b\x57\x92\x6e\x44\x9b\xbd\x7c\x1c\xed\xc4\x22\x14\xc1\x64\xe7\x8c\x1c\x0c\x73\x72\xd4\xa6\xc6\x9b\xbe\x48\x18\xb5\x3f\xb5\xb1\xbf\x8a\x97\x23\x5a\xad\x28\x3b\x0d\x33\x47\xbd\xf5\x3e\xd2\x8e\x16\x4f\xe4\x65\x80\xb6\x73\x32\xe2\xdb\xc4\x1d\xd1\x68\xcd\xe9\x4d\x46\xdd\xc1\x28\x79\x47\xde\xb8\xb4\x82\x9d\x4e\xe9\x3b\x98\x94\xc0\x83\x61\x0a\x8f\x68\x31\x4e\xf2\xe9\x48\x3c\x98\x8a\xc6\x83\x51\x22\x8f\x68\xb4\xe0\xf8\xfa\xa9\x3c\xa2\xdd\x06\xcb\xd7\x47\xe6\xd9\x59\x2e\x78\x3e\x95\xce\xb3\x33\xd9\x60\xfa\x74\x84\x9e\x9d\x71\x95\xeb\xd3\x53\x7a\x76\x57\x50\xd9\x3e\x3d\xa9\xe7\xec\x0a\xef\xfe\xda\x25\x16\x6b\x5a\xcf\x72\xd0\xd8\x33\x7e\x40\x8e\xf7\x83\x35\xeb\x07\xd6\xbc\x1f\xd8\x32\x7f\x60\xcb\xfd\x81\x2d\xfb\x07\x0e\xf8\x3f\xb0\x67\x00\xbb\x26\x68\xb1\x68\x8d\x1d\x5a\x68\x1c\xdc\xf1\x80\xe0\x8e\x09\xec\x35\x85\x8f\x4e\xf7\x99\x22\x84\xcb\xfb\x4c\x11\x62\xe6\xe0\x98\x11\x04\x97\x9c\x20\xd8\xb3\x82\x60\xc9\x0b\x82\x6b\x66\x10\x46\xb9\x41\xba\x37\x63\x22\x76\x10\x26\xe3\x07\x61\x8c\x21\xa4\xae\x08\x53\xd6\xcb\x11\x52\x7d\xce\x05\x7d\xd8\xcb\x12\xda\xfa\x0d\x4e\xb5\x3c\xa1\xfd\x7e\x4f\xc3\x14\xda\x2c\x89\xeb\x03\xd9\x2d\xae\xd0\xc6\x43\xde\xa5\x11\xa9\x21\x55\xa8\xf7\x76\xce\x88\x44\x98\x92\x4a\x84\x11\x32\xd1\xc5\x86\xbf\x4b\x27\xd2\xe7\x18\x79\xab\xfd\x84\x22\xd5\xb2\x4c\x05\xd2\x47\x29\x52\xdd\x3d\x25\x95\xdb\x47\x2a\x5a\xec\x95\xeb\xc3\xc3\x3a\x5a\xd1\x62\xe2\x6a\x50\x79\x03\xc4\x22\x7d\xa8\xa9\x8d\x6f\x50\x8b\x0e\xe6\x1a\x3d\xb9\x68\x31\x26\x2a\x4d\x43\x17\x68\x39\x34\x8e\xcd\x94\x5f\x85\x16\xc1\xe8\xe4\x75\x73\xef\x49\x6a\xb2\x8f\xd6\x02\x8f\x40\x17\x79\x94\xc5\x11\x03\x09\xce\x38\x48\xb0\x61\x21\xc1\x45\xf4\xce\x0d\x13\x09\x4e\xb8\x48\x98\x20\x1c\x69\xcd\x47\xc2\x6b\x05\x23\x9d\xa0\x92\x30\x19\x2e\x09\x23\xc8\x24\xdd\x69\xde\x8b\x4d\x52\xdf\x91\x02\xb6\xd4\xa3\x93\x0e\x8e\x5d\xcc\x5a\xf8\x24\x39\xc6\xd3\x86\x2e\xad\xe8\x53\x90\x1e\x66\x0d\xcd\x23\xe2\xc7\x54\x67\xb8\x7b\xf8\x12\xa6\x01\x30\x61\x18\xc2\x24\x5a\x14\x22\x95\x7d\x20\x26\x75\x8d\xfe\x50\x4a\x75\xf7\xc0\x98\xf6\xa1\xdd\x0e\x90\xf9\xc7\x08\xed\x3a\x96\x40\x85\xf6\x8a\xa9\x3a\x38\xd9\x00\x34\x2d\x4e\x49\xb4\xc3\xb0\x9d\xd0\xb4\xdd\x00\xeb\x0d\xa6\x52\x9f\x99\x63\xbc\x13\x46\x10\x4f\xf2\xdc\xf2\x7f\x25\x56\xaa\xe2\x9e\xf4\x1e\x56\x8e\xb6\xe9\x91\x4f\x57\x4d\x9e\x2a\x4c\x7a\xf4\x00\x92\x05\x34\x0a\xd6\xe0\x28\xd8\xc2\xa3\xe0\x08\x20\x05\x37\x10\x29\x58\x82\xa4\x60\x09\x93\x82\x2d\x50\x0a\xd6\x50\x29\x58\x82\xa5\x60\x0f\x97\x82\x1b\xc0\xb4\x6b\xc6\x26\x92\xe6\x02\x34\x05\x67\xb0\x69\x9f\x25\x6a\xc0\xca\x05\x74\xda\x63\x89\x1c\x44\x73\x05\x9f\x6a\xda\x85\xcf\x71\x0d\xc4\x3c\xd7\xe0\x58\x57\x89\x90\x2e\x18\xa6\xca\x79\x0d\x47\xcd\x7b\x0d\x26\xb9\xaf\x09\x36\xeb\x6c\xd9\xf6\xf9\xaf\xe3\xc3\x8e\x6f\x6d\x5e\x8b\x46\xbd\x2a\x2f\x4f\x33\x32\x8d\x17\x89\xd6\xa8\x4e\x48\xb7\xea\x5a\xcf\xa1\x0e\x94\x3f\x08\x87\x5a\x8d\x29\x4f\xa1\x7a\x0a\xb5\x5d\x3c\x85\xea\x29\x54\xa5\x78\x0a\xd5\x53\xa8\x9e\x42\xf5\x14\x2a\xa5\x55\x7f\x48\x0a\x55\xb7\xbc\xf2\x0c\xaa\x67\x50\x3d\x83\xfa\x47\x0b\x54\x7a\x06\xd5\x33\xa8\x9e\x41\xad\x9b\xed\x19\x54\x4d\x93\x3d\x83\xea\x19\x54\xd5\xb8\x67\x50\x8d\x2f\xe3\x19\x54\xcf\xa0\x76\x6c\x78\x06\x75\xd0\x94\x67\x50\xcd\x4d\x79\x06\x55\x5b\x3c\x83\xea\x19\x54\xcf\xa0\x7a\x06\xd5\x33\xa8\x9e\x41\x95\xc5\x33\xa8\x8d\xe2\x19\x54\xcf\xa0\x7a\x06\xd5\x33\xa8\x08\x9b\xc7\x0b\x45\x7a\x02\xd5\x13\xa8\x9e\x40\xf5\x04\xaa\x27\x50\x89\x4d\xf5\x04\x6a\x59\x3c\x81\xfa\x7f\x23\x52\xea\x09\x54\x4f\xa0\x7a\x02\xb5\x5d\xdf\x13\xa8\x9e\x40\xed\xb7\xe4\x09\x54\x33\x4b\x9e\x40\xf5\x04\x6a\x55\xbe\x77\x02\x75\x1f\xae\xbf\xb6\x1d\x29\xaf\x03\xa3\x5e\x6b\x5a\x42\xb3\x37\x8d\x67\xc9\xba\x7d\xea\x88\x0e\xe2\xaf\x7c\x93\x7e\x1f\x88\xa3\xe7\xe4\xa5\xb9\x1c\x9a\xb8\x45\x1c\x9d\xb9\xdb\x84\x29\x5b\xdb\x90\xa6\x6e\xa6\x94\xb3\xb2\x19\x74\x43\xc7\x1b\x23\x55\x63\x49\x6e\x47\x95\x7d\xf8\x28\xdd\x84\x87\x8c\x15\xd0\xa9\x18\x44\x94\x69\x29\x81\x8c\x45\x6c\x9d\x77\xa3\xc4\xfb\x20\xa3\xec\x0f\xf3\x6d\x9a\x1c\x1e\xb7\xa2\x5d\xc5\xa0\xac\x8e\x5f\x96\x07\x29\x09\x66\xcf\xce\x6f\x16\x72\xfd\xf2\xe9\xea\xf6\x7a\xf1\xe1\xfc\xa7\xf3\xc5\xd9\x0c\x56\xcb\xeb\x19\xbc\x5f\xae\x56\xcb\x4b\xfc\xf9\x37\xda\xe6\xe5\x44\xdf\x16\x82\x9d\xd5\xf2\x9a\x50\x4b\xde\x2d\xb2\x62\x7c\xd8\xd5\xe3\x91\xfa\xd2\x86\x71\xce\x1e\x09\x9f\xf0\x87\x24\xdd\x05\xb9\xa8\xff\xb7\xbf\xd8\xbe\xf3\x57\xcd\x3b\xb1\x7b\x8d\xf8\x5b\xb4\x0b\xe2\x17\xcb\x43\x26\x79\x02\x41\x14\xc9\x37\x92\xbf\x34\xba\x57\x00\x69\xb6\x78\xa1\x2f\x59\xbe\xa5\xd0\xcc\x2e\xe7\xd8\x9b\x66\x53\x7e\x0f\xf3\xac\xd2\x60\x07\x73\xed\x5d\xf1\x30\xbe\xec\x84\xc5\x3b\x5a\x9c\xb5\x7d\xa6\x43\x8d\xc4\x48\x5f\x2d\x8b\x69\xa7\x1b\xc4\x69\xaf\x44\x1c\xc5\xa8\xb9\xb0\xe7\x6d\xb8\xde\x0a\xce\x98\x60\x51\x45\xe1\xf6\x81\xa4\x93\xed\xdf\x93\x82\xcb\x6d\x7a\x4d\x1d\x7c\x1b\x2e\x17\xab\x8f\xcb\x33\xf5\xc3\x50\xfc\x1b\xf5\x74\x7e\x59\x7d\xfe\x6b\x6d\xea\xfc\xaa\xfa\x9b\x78\x32\xbf\xa8\x7d\x31\x5f\x2d\x6e\x57\xc7\xfb\x5e\x75\xfb\x87\x6e\x84\xe4\xdb\x39\x69\xf4\xa7\x45\x65\x82\x5b\xeb\xa4\xf1\xc4\xe8\x95\xe5\x03\x43\xd4\x97\x58\x7b\x90\xbe\xcc\xa9\xa2\x2c\xae\x76\x53\xb7\x9a\x96\x7c\x4f\xbb\x29\xeb\xf6\xa9\xb1\xf8\xfd\x3e\x7a\x81\xa0\xe8\x7f\x8b\x13\x99\xc1\x43\xce\xd2\x5a\x2e\x08\xc2\x4c\x4c\x84\x87\x1c\xeb\x28\xb7\xd0\xf3\xb1\xd5\x34\x71\xb9\x0a\xb0\xd5\x32\x99\x46\xc7\x64\x50\xc3\xa4\x52\x23\x21\x7b\x6b\xda\xfa\x25\xd5\x29\x02\x82\xc5\x63\x6a\x97\xb8\xd7\x2d\x19\xd2\x2c\xb1\x93\x9b\xd0\xeb\x95\x74\x95\x47\x08\xa6\x6b\xad\x92\x3e\xd5\x11\x82\xd1\xf3\x07\x08\x9c\x2b\x8e\xb8\x57\x1b\x19\x54\x1a\xa1\xad\x60\xfb\x55\x46\xca\x4e\xa0\x0c\x55\x8d\xc2\x88\x56\x2b\xc4\xda\x76\x57\x27\x84\x62\x92\x76\x52\xd2\x85\xaa\x88\xcb\xd9\xdc\x81\x9a\xc8\x2b\x2c\x0b\x1c\x29\x89\x4c\xa5\x22\x32\x85\x82\xc8\x04\xea\x21\x43\xca\x21\xb4\x63\x71\xbd\xaa\x21\x95\xfe\x07\xc1\x68\x57\x31\xa4\xa5\xfd\x41\xfb\x06\x57\xda\x09\x7a\xdd\x0f\xd2\xb4\x28\x86\x62\xbf\xe6\x07\xd9\xa3\xe4\xfe\x58\xd8\xc0\x91\x30\x08\x48\x87\xed\x7a\x74\x3e\x6c\x8e\x84\xe9\x35\x3e\x6a\xb5\x0e\xca\x22\xa3\xa3\xef\xd1\xaf\xd4\x41\x19\xab\xe9\x90\x4a\x87\x9d\x1c\xa4\x46\xa1\xa3\xa1\xb5\x41\x73\xed\x4c\xa2\xce\x31\xac\xcc\x41\x9f\x02\x27\x3a\x6b\x36\xdd\x39\xb3\x01\x35\x0e\x88\x13\xe2\x23\x9b\x46\x89\x63\x12\x15\x8e\x61\x05\x0e\x0b\x37\x5f\xaf\xfa\x46\x53\x47\x83\x6e\xb7\xad\xbc\xd1\xd2\xd0\xa0\x1b\x96\xaa\x1b\x1a\xfd\x0c\x4b\x93\x85\xe2\x86\x5e\x3b\x83\x6e\x5b\x55\xdb\xd0\xeb\x66\x38\xb1\xfe\xd7\x77\x3a\xeb\xef\x6c\xac\x6b\x54\x36\x1c\xea\x65\x50\xfd\xaf\x36\x3a\x19\x76\x1a\x19\x56\xfa\x18\x56\xda\x18\x56\xba\x18\xb6\x9a\x18\x96\x7a\x18\x2e\xb4\x30\x9c\xe8\x60\x38\xd2\xc0\x70\xa4\x7f\xe1\x48\xfb\xc2\x91\xee\x85\x23\xcd\x0b\x97\x7a\x17\xce\xb4\x2e\x2c\x75\x2e\x6c\x34\x2e\x9c\xea\x5b\x0c\x6b\x5b\xd0\x98\x99\x89\x74\x2d\xa6\xd1\xb4\x18\xd4\xb3\x20\x82\xcb\x7d\x5a\x16\x95\x2a\x05\x31\x3a\xad\xd3\xb1\x68\x28\x52\x10\xac\xea\x34\x2c\x6c\x63\xc8\x1d\xfd\x8a\x96\x12\x05\xcd\xd5\xd0\xd4\xae\xe8\x53\xa1\xb0\xdb\xca\xba\x53\xa0\x98\x4c\x7d\x62\x48\x79\xc2\x76\x13\xdb\x55\x9d\xa8\xf5\x23\x28\xe3\xb5\x4f\x71\x82\x7e\x98\xb6\x4f\x6d\xc2\x46\x7d\xa5\x4f\x69\x42\x95\x5d\xa0\x6d\xb6\x3b\xd3\x53\x4b\x2f\x82\xd2\xab\xfd\x0a\x13\xc5\xd3\xb3\x88\xd1\x29\xea\x12\x96\xb3\x80\x5e\x59\xa2\xa1\x11\x41\xf4\x64\xb5\x26\xe7\xb6\x3e\x04\xc9\xf3\xa4\x28\x4a\xb4\xb5\x21\x28\x63\x4a\xfb\xba\xb7\x75\x21\x48\x1d\xe0\x44\x43\xdd\x91\x1e\x84\x1b\x2d\x08\xb2\x0e\x84\xa5\x64\x82\x0b\xfd\x07\x7b\xed\x07\x97\x21\x23\x6b\xcd\x87\x57\x08\x18\x39\xd1\x7b\x98\x46\xeb\x61\x48\xe7\x81\x16\x85\xe9\xd5\x78\xa8\xd4\x1a\xa8\xd1\xf9\xb6\xbe\x83\xaa\xd4\x40\xb2\xda\xd4\x76\xd0\xa8\x34\x50\x56\xc5\x65\xd7\xe9\x15\x1a\xc8\xf1\xbc\x09\xd4\x19\x26\x50\x66\x18\x50\x65\xa0\xca\x99\xf7\x29\x32\xd0\x95\xdc\xfb\xd5\x18\x1a\xc1\x33\x8a\x17\xbb\xad\xc4\xd0\x09\x9e\x91\x4e\xbc\xb4\xc2\x6d\x6a\xf0\x8c\xf2\xe1\x6e\x87\xdb\xda\x81\x33\xe2\xa2\x45\x2f\x8e\x3f\xb7\x50\xba\xa8\x95\x17\xb4\x1a\x0a\x0e\xc3\x84\x5d\xfd\x04\xf2\x1e\xc3\xb9\x76\xc2\x50\x1c\xab\x11\x95\xa2\x9d\x21\xea\x5f\xc5\x59\x25\xc4\xe9\x8d\x63\x95\x1d\x63\xbd\x80\xaf\xb4\x12\xda\xaa\x07\xa4\x48\x91\x4e\x27\x41\xa3\x78\x40\xd9\x25\x57\x21\x2c\x47\x6a\x07\x54\xe7\xbf\x85\xca\x81\x9d\xc2\x81\x95\xba\x81\x0b\x65\x03\x07\xaa\x06\x36\x8a\x06\x36\x6a\x06\x56\x4a\x06\x76\x2a\x06\x36\x0a\x06\x96\xea\x05\x0e\x94\x0b\x5c\xa8\x16\xb8\x51\x2c\x70\xa3\x56\xe0\x46\xa9\xc0\x8d\x4a\x81\x1b\x85\x02\x77\xea\x04\x56\xca\x04\xb9\xba\xa7\xbb\x08\xe2\xc7\x43\xf0\xc8\xcc\x67\x58\xd2\xe6\xb6\xb5\xa9\x5d\xe9\x1b\x61\x6c\x4e\xdd\x1b\x4a\xe8\xbe\x3a\x01\xf8\xc0\x72\x95\xf3\x32\xfd\xe6\x1c\xe2\x30\x5f\x3e\xb1\x34\x0d\x37\x47\xee\x8f\x4f\x8d\x2b\xd3\x3a\x81\x2f\x9b\x79\xfb\xf9\x02\x4f\x2c\xc8\x0a\xef\xa9\x58\x8a\x21\x46\x98\xe8\x3c\x15\x88\x2f\xce\x1d\xc7\xd2\xad\x84\x3e\x19\x2d\x5a\x25\x8e\xb0\x27\xc5\x1d\x42\x10\xbf\xc8\x7f\x16\x51\x85\x60\x2d\x50\xb7\x18\xe7\x92\x2c\x1b\x2a\x4f\xaa\xf3\xdb\x97\xb4\x2b\x5f\x6c\x54\x1e\xca\x00\x63\x51\x20\x02\x77\xbc\x5d\x77\xe6\xe2\x14\x29\x7b\xf8\xf2\xf4\xe3\xdb\x94\x65\xf9\xdb\xa7\x1f\xdf\x96\x52\x10\xa7\x72\xe9\x7e\x56\x3c\xa2\x04\xa7\x5d\x51\x38\x03\x63\xb8\xbb\x6c\x99\xb9\x1b\x1e\xcb\xf9\x36\x65\xd9\x36\x31\x08\x5e\x9a\x87\x2a\xdb\xaf\x6e\x75\x89\x91\x7a\xad\xe1\x59\xd4\x82\x28\x8c\xf9\x9e\x21\x0d\x9e\x63\xd8\x26\x69\xf8\x2f\x3e\x4e\xf9\x0e\x5f\x86\x16\x46\x7b\x89\x3f\xa5\xf5\x36\x48\xf3\xb1\xb7\x9a\xe4\x0f\xa5\x78\x3f\x0d\x7d\x9d\x28\xb0\x8c\xe0\xb4\x33\x7e\x30\x58\x36\x6a\x9d\x44\xa6\xf9\xf1\xd1\x73\x61\xfb\x7c\x3c\xbf\x14\xae\x22\xa5\x83\xc4\x65\x8c\x9d\x92\xdd\x73\xeb\x59\x1e\xe4\x4c\xf6\x8b\xf0\x00\x09\x0f\x7e\xf5\xee\x99\x7e\x6e\x44\x2b\xca\xf8\x81\x00\xd9\xe5\xbe\x39\x80\xa2\xc1\xe8\xf4\x59\x65\xba\xac\x0f\xcb\x8b\xe5\x8d\x8a\xe5\xfe\x7c\xb3\xf8\x3c\x83\xf7\x17\x9f\x16\xe2\xef\x85\xf1\x81\xbd\xcf\x8b\x8b\x8b\xe5\x2f\x33\x58\xde\xf0\x45\x86\x38\xd5\x65\x76\x80\xcb\x7c\xdf\x76\xd2\x6d\xb0\x61\x3d\x7e\x57\x86\x3f\xe5\xb7\x6e\x6e\x75\x61\xb6\x2a\x3b\x29\xba\xc7\xf0\xc7\xb2\x0f\x0d\x7f\x7c\x63\xd4\x0b\x48\x41\x18\xdb\x17\x14\x27\xfc\x62\xf5\x92\xe2\x65\x5b\xba\x2f\x6a\xd5\x3b\xc5\x6b\xca\x60\x7d\x48\x45\xdc\x1a\xfd\xb6\x56\xcd\x99\xe4\x8d\xed\x51\x59\x99\xbf\x5f\xfe\x73\x31\x83\xf7\x8b\x8b\xe5\x2f\xee\xdf\x3a\xba\x9c\xca\x89\x6c\x99\xe9\xab\xb7\x30\x7b\x45\xa2\xe0\x9e\x45\xc7\x19\xc7\x17\xfc\x52\xf8\x41\x35\x97\x6d\xac\x46\x13\x6a\x14\x89\x67\x8d\xb9\xbd\xf8\xb0\xbb\x37\xf4\xa0\x96\x4a\x2f\x9b\xe4\x60\x3a\xf8\x5a\x3d\xf2\x4f\xde\x3a\x7c\x8f\xac\x9a\x54\x93\xda\x25\xf5\x7f\x33\x7c\x19\xb2\x6d\x72\x88\x36\x82\x74\x95\xd1\xf4\x12\x1c\x8a\x83\x3c\x7c\x62\x90\xad\x83\x88\xe1\xc2\x20\x72\x19\x3e\xb2\x68\x0e\x77\x2c\xdb\x86\x0f\xf9\xd9\x21\x35\x12\x0c\x40\x0c\x3e\xcd\xbe\x57\xb9\x14\x6e\x0d\xcd\x60\x53\xd4\x6b\x00\xd2\xd9\x3e\x0a\x5e\x20\x28\xf4\x42\xc2\xcc\x60\x6e\x96\xeb\x67\x98\x37\xea\xc8\x7f\x83\x2c\xdc\x1d\xa2\x3c\x88\x59\x72\xc8\xa2\x17\xfe\x44\x9e\x33\x53\xee\xeb\x21\x4d\x76\x90\x3f\x27\xdc\x48\x18\x05\xe9\x49\xc4\xe2\xc7\xbc\xd8\x89\x4b\x1f\x73\x06\x7f\x62\xa7\x8f\xa7\x33\x78\x66\xec\xeb\x09\xdf\x0f\x9e\xf0\xbf\x46\x4d\xcb\xc7\x98\xfd\xf9\x54\xed\x84\xd2\x61\xbd\x4f\xb2\x90\x8f\x10\x09\x7c\x84\x42\xc2\x7a\xd4\x66\x12\x47\xed\xf0\x95\xe8\x83\x82\x53\x12\x9b\x68\x71\xbc\x2f\x79\x80\x8b\xf3\xab\x05\xec\x23\x83\x73\x38\x7c\x6c\x0c\x0f\xb7\x6f\xf3\x6f\xa1\xe1\xf6\xcc\x68\xb3\xd0\x1a\x62\xbf\x72\xf3\xa6\x55\xcc\xbf\xcb\x26\x66\x3b\xa3\xb5\xde\x67\xb4\xf9\xf2\x5f\x21\xf8\x16\x8e\x3a\x66\x30\xfb\x14\xe3\x8f\x07\xf2\xd3\x41\xfc\x70\x74\xfa\x42\x7e\x38\x8a\x49\xd2\xe4\xe6\x41\xce\x77\xd3\xdf\xd2\x2d\xbf\x0c\xa6\x1a\x72\xd4\x08\xfb\x86\x8b\xb8\xee\x12\x8e\x77\x95\xec\x89\x53\x78\xff\xc2\xbf\x0c\xc1\x21\xca\x67\x10\x08\x4f\x42\x60\xf6\x79\x94\x5f\x8e\x42\x57\xa2\xcb\x44\xc1\xed\x87\xf9\xc5\x42\x59\x7a\x19\x99\xe5\xd3\xc2\xfc\x66\x06\x17\xcb\x9f\x7f\x78\x67\xb2\x3e\x33\x5d\x9d\x9d\x74\x5b\x64\x54\x4b\x36\xc8\xec\xa7\xbc\xcd\x83\xbf\x7c\x99\x76\xa2\xfa\x3c\xcd\x44\x65\x62\x16\x31\x51\x7d\xf6\x13\xd5\x1f\x61\xa2\xfa\xec\x27\x2a\xa5\x7c\x77\x13\xd5\xff\x1f\x00\x00\xff\xff\xac\x6e\xe1\x07\xbc\x02\x0a\x00"), }, "/monitoring/group.yaml": &vfsgen۰CompressedFileInfo{ name: "group.yaml", @@ -580,9 +580,9 @@ var Assets = func() http.FileSystem { "/monitoring/metric_descriptor.yaml": &vfsgen۰CompressedFileInfo{ name: "metric_descriptor.yaml", modTime: time.Time{}, - uncompressedSize: 14121, + uncompressedSize: 14120, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x5a\x6f\x73\x22\x37\x93\x7f\xef\x4f\xd1\xc5\x56\x2a\xb6\x0b\x06\xf0\x66\x9f\xbb\x73\x2a\x2f\xb0\x4d\x1c\x6a\xfd\xef\x0c\x4e\x2e\x4f\x2a\xf1\x88\x99\x06\x14\x6b\xa4\x89\xa4\x31\x4b\xfc\xf8\xbb\x5f\xb5\x34\x03\x68\x00\xc7\x76\xae\xae\xee\xf6\xcd\x1a\x8d\xd4\xea\x6e\xb5\x7e\xfd\x6b\x49\x1f\xe0\x54\xe5\x0b\xcd\xa7\x33\x0b\x47\x9d\xa3\x23\x38\x57\x6a\x2a\x10\x2e\x2e\x4e\x23\xe8\x09\x01\xb7\xf4\xc9\xc0\x2d\x1a\xd4\x8f\x98\x46\x7b\x1f\x60\xef\x03\x5c\xf0\x04\xa5\xc1\x14\x0a\x99\xa2\x06\x3b\x43\xe8\xe5\x2c\x99\x61\xf5\xa5\x09\x3f\xa2\x36\x5c\x49\x38\x8a\x3a\xb0\x4f\x1d\x1a\xe5\xa7\xc6\xc1\xb7\x7b\x1f\x60\xa1\x0a\xc8\xd8\x02\xa4\xb2\x50\x18\x04\x3b\xe3\x06\x26\x5c\x20\xe0\x97\x04\x73\x0b\x5c\x42\xa2\xb2\x5c\x70\x26\x13\x84\x39\xb7\x33\x37\x4d\x29\x84\xf4\xf8\xb9\x14\xa1\xc6\x96\x71\x09\x0c\x12\x95\x2f\x40\x4d\xd6\xfb\x01\xb3\x5e\x63\xfa\x37\xb3\x36\x3f\x6e\xb7\xe7\xf3\x79\xc4\x9c\xb6\x91\xd2\xd3\xb6\xf0\x3d\x4d\xfb\x62\x70\xda\xbf\x1a\xf6\x5b\x47\x51\xc7\x8f\xb9\x93\x02\x8d\x01\x8d\x7f\x14\x5c\x63\x0a\xe3\x05\xb0\x3c\x17\x3c\x61\x63\x81\x20\xd8\x1c\x94\x06\x36\xd5\x88\x29\x58\x45\x1a\xcf\x35\xb7\x5c\x4e\x9b\x60\xd4\xc4\xce\x99\xc6\xbd\x0f\x90\x72\x63\x35\x1f\x17\x36\x70\x57\xa5\x1f\x37\x41\x07\x25\x81\x49\x68\xf4\x86\x30\x18\x36\xe0\xa4\x37\x1c\x0c\x9b\x7b\x1f\xe0\xa7\xc1\xe8\x87\xeb\xbb\x11\xfc\xd4\xbb\xbd\xed\x5d\x8d\x06\xfd\x21\x5c\xdf\xc2\xe9\xf5\xd5\xd9\x60\x34\xb8\xbe\x1a\xc2\xf5\xf7\xd0\xbb\xfa\x19\x3e\x0f\xae\xce\x9a\x80\xdc\xce\x50\x03\x7e\xc9\x35\xe9\xaf\x34\x70\x72\xa4\x5f\xbd\x21\x62\xa0\xc0\x44\x79\x85\x4c\x8e\x09\x9f\xf0\x04\x04\x93\xd3\x82\x4d\x11\xa6\xea\x11\xb5\xe4\x72\x0a\x39\xea\x8c\x1b\x5a\x4e\x03\x4c\xa6\x7b\x1f\x40\xf0\x8c\x5b\x66\x5d\xcb\x86\x51\xd1\x1e\x97\x13\x75\xbc\x07\x60\xb9\x15\x78\x0c\x97\x4a\x72\xab\x34\x97\xd3\xf6\x25\x5a\xcd\x93\x33\x34\x89\xe6\xb9\x55\x7a\x0f\x20\x2d\x7f\x70\x25\x8f\x61\x34\xc3\xb5\xee\x50\xef\x0e\x1a\x8d\x2a\x74\x82\x7b\x00\x5f\x5a\x69\x22\x5a\xc6\xea\x22\xb1\x2d\xc9\x32\x9a\x68\x53\xba\xef\x35\x63\xa6\xc5\x59\x76\x0c\x13\x26\x0c\xee\xe5\xcc\xce\x0c\x29\x38\x45\x4b\xff\x6d\x51\x62\x52\xc8\x84\x7e\x51\x74\xba\xe5\x9d\x22\x05\xe5\x44\xe9\xcc\xd9\x0d\x6c\xac\x0a\x0b\x6c\xdb\x9c\x00\x39\xd3\x2c\x43\x8b\xda\x78\xf1\x2d\xd8\xa9\x20\xfd\xab\x42\xec\x18\xac\x2e\xb0\x6c\x0c\x54\xea\xc1\xa4\x10\x02\xb8\x34\xd6\x6d\x08\x35\xd9\x3e\x35\x05\xe8\xe2\xb5\x26\xb9\xce\xff\x0f\x8c\x4a\x51\xa0\xc5\xd7\x5a\xe5\x7b\xff\x1f\x35\xa2\x27\xc4\x5b\xed\x10\xe2\x6d\x96\xe4\x5a\xfd\x8e\x89\x7d\xc9\x00\x93\xcc\x30\x63\xc7\xe5\x2f\x00\xbb\xc8\xf1\x18\x08\x86\xe4\x74\x0f\x40\x70\xf3\xea\x6d\x41\x7d\xb7\x84\x50\xc6\xe4\xe2\x7f\x57\x6d\xca\x16\x4a\xa2\xb4\x4e\xae\xef\x5a\x4e\x51\xd7\xa3\x92\x50\xc1\xd3\xf6\xa5\xf7\xc8\xc1\xd3\xa5\x6a\xa6\xfd\xf4\x54\xfe\xf9\xfc\xdc\xce\x6a\xa3\xe8\x2b\x29\xf4\xfc\x1c\x8c\x2f\x0c\x9a\x96\xb1\xcc\x62\x6b\xc6\xa5\x0d\xec\xf1\x3d\x72\xa6\x51\xda\x56\xa2\x24\xe5\x31\xd4\x75\x57\xac\x10\x2c\xd1\xc8\x2c\x6e\x11\x51\x83\xb7\xf5\x4f\x1a\x59\xda\xb2\x3c\x43\x55\xd8\x63\xe8\x04\xdf\xdc\xfe\xdf\xf5\xd1\x87\xdf\xe6\x57\xef\x74\x35\xde\xb6\x58\xe5\xef\x96\xeb\xb4\xfc\xe1\x3d\xf5\x99\xcb\x74\xd9\xf4\xc8\x44\x81\xa3\xf5\x4e\xa1\xcd\xb9\x56\x39\x6a\xcb\xd1\xac\x56\x7b\x3d\x16\x97\x8d\x1b\xb1\x5b\xfd\xf3\x46\x4c\x55\x99\x19\xce\x56\x83\xd7\x3a\xd5\x36\x72\x8a\x96\x71\x81\xe9\x7a\x7b\x45\x28\xbc\x11\x4d\x98\xcf\x78\x32\x83\x84\x49\x18\xa3\xdf\x04\x7c\x5d\x22\x40\xaa\x92\x22\x43\xe9\xb3\x63\x14\x68\xf4\x50\x8c\x51\x4b\xb4\x68\x5a\x3c\xcb\x0a\x4b\x3c\x22\x58\x4d\x20\x32\x90\x0b\xb6\xb8\x22\xa5\xdf\x6e\xe4\x6a\xf0\x6e\x23\x13\x25\x13\x6e\xd0\xed\xba\x65\xf6\xdf\x6a\x5d\xa9\x8b\x33\x91\x4c\xd5\x81\x9d\x5c\x5a\xd4\x13\x96\xa0\x89\xe0\xce\x20\x18\x94\x16\x09\x00\x13\x66\x3c\x61\x73\xa9\x44\x02\xca\xb4\x24\x11\x5c\xa5\x4d\x37\x25\x7e\x61\x59\x2e\x10\x1a\xb7\xf8\x47\x81\xc6\x06\x82\x13\x55\x48\xdb\x88\x60\xe4\x29\x21\x8a\x94\x58\x92\x72\x06\x30\x01\xe3\xc2\x02\xb7\xd4\xa4\x31\x51\x59\x86\x32\xf5\x48\x34\x26\x1d\xac\x93\xcf\xe4\x22\x10\xe9\xcd\x33\xc0\x8c\x51\x09\x67\xc4\xb4\x1c\xa3\x24\xa3\x5a\x8f\xdc\x70\x22\x74\xe4\x18\xcc\xad\x69\x82\x29\x92\x19\x30\x03\xff\x59\x28\xcb\xde\xbc\x84\x82\x8d\x51\x98\xcd\xd5\x63\x5a\xb3\xc5\xee\xc5\xbb\x70\xc3\x76\xad\x1b\x61\x2f\x59\xa7\x26\xa5\x7c\xb0\x33\x66\x83\x38\x74\x29\x83\x86\x8c\x29\xf9\x2d\xf9\x5c\x95\x98\x02\x87\xb8\xa0\xe6\xa6\x74\x8c\x53\x30\x82\xef\x57\x2b\xd3\x74\x51\x11\xb3\x3c\x47\x39\xe5\x12\xa3\xa9\xab\x0b\x58\xce\x4d\x94\xa8\xac\x4d\x54\xba\xed\x4a\x02\xdd\xd6\x68\x72\x25\x0d\xde\x0b\x46\x01\xc0\xd1\xc4\x5b\x9c\xef\xe6\x80\x19\x33\xc0\xbc\x05\xcb\xd8\xfb\x61\x34\xba\x81\x4a\x08\x24\x2a\xc5\x26\xc4\x4b\xa1\xf4\x3b\x26\x42\x1d\xc8\xa4\xf2\x81\x6c\x17\x4a\x3d\x00\xb3\xb0\x9c\xda\x49\x35\x45\x92\xa0\x31\x93\x42\x2c\xe5\x3a\x1a\xfc\x7b\x61\x7c\x7c\x2c\x5b\x03\xa1\xce\xa3\x13\x87\x00\x6f\x5e\xf4\x25\x1d\x45\x99\xb6\x30\xcb\xed\x62\x47\x07\xca\x97\xad\x72\x3b\xe3\x7a\xe0\x73\x8b\x59\x10\x35\x5b\xa1\xb6\x16\x3a\xbe\x47\x3d\x81\x6d\x84\xd2\x76\x40\xdd\x0c\xb3\xda\xa7\x9d\xb8\x53\xd3\xe2\x25\x88\xdd\x9c\x04\x7a\x30\x2b\x32\x26\x5d\x6a\x72\xa5\xd4\x3a\xdc\x56\x51\xe1\x62\x24\xda\x32\xe7\xab\x16\x83\xfe\x3d\xe0\xe2\xef\x19\xf4\x19\x17\x2f\x1b\xf2\x35\xed\xca\x07\x5c\x94\x4a\x73\x53\x6a\x0d\x55\x7b\x46\x01\x97\x21\x5a\x67\xd2\x44\x09\xa1\xe6\xdb\x26\x06\x48\x34\xb7\xa8\x39\x3b\x86\x43\x38\x53\x68\x5c\x61\x4c\xa5\x30\xa6\xd0\xed\x74\x20\x99\x31\xcd\x12\x62\x4d\x11\x1c\xc2\x25\xb3\xc9\x0c\xcd\x5f\x4a\xd5\x38\x2d\x04\x5b\x96\x82\x4e\xe9\x98\xb5\xfe\xec\xb5\xfe\x79\x18\xc3\xa1\x67\x74\x5c\x1b\xbb\x92\xef\x75\x26\x08\x91\x50\xe4\x39\xea\xd6\x16\xb9\x4a\x83\x50\x73\xd4\x2d\x87\xf5\x02\xad\x45\x1d\x95\xf2\x34\x66\x8c\xbb\xb2\x71\xa5\xf3\x52\xa8\xef\x6a\x9a\x5b\x64\xa6\x7c\xca\x09\x7c\x95\xf6\x15\xa5\x49\x94\x46\x13\x7d\xfd\x77\x42\x60\x49\x33\xfe\x5e\x20\xfc\x58\x63\x2b\x5b\x3a\xbe\xb4\x11\x97\xc3\xfb\xb2\xc8\x5e\x11\x51\x0e\x29\xd5\x04\x52\x66\x59\x00\xf3\xcc\x18\x3e\x95\x1e\xea\x5f\xd8\x23\x00\x37\xca\xf8\x9c\xe6\x1c\x60\x8e\x61\x38\xba\x1d\x5c\x9d\x37\xe1\xe4\xfa\xfa\xa2\x09\x83\xab\xd1\x3f\xbe\xf9\x5b\x8e\x05\x40\x59\x64\x9b\x5e\x6d\x95\x33\x6d\xf9\x40\x53\x6f\x69\x76\xba\xac\xe5\xce\x42\x26\xb3\xa1\x65\xd3\x77\xd0\x9f\x8b\xd5\xe0\x6d\x9d\x76\x2d\xd1\x72\x50\x6d\x79\xc2\x85\xb9\x2e\xf9\x87\xdf\xdc\x5e\x4f\x30\x34\x2c\x64\x88\x90\xe2\x84\x4b\xee\xc8\xdf\x72\x1d\x02\xbb\xab\x35\xb9\xe8\xdd\x5d\x9d\xfe\x70\x3f\x1c\xf5\xce\xfb\xf7\x77\x57\xc3\x9b\xfe\xe9\xe0\xfb\x41\xff\xac\x09\x77\x57\x83\xcb\x9b\x8b\xfe\x65\xff\x6a\x44\x3f\x6f\x6e\xfb\xbe\x6f\x13\xfa\xbd\xdb\x8b\x9f\xef\x7b\xa7\xa7\xfd\xe1\x30\xdc\x44\xbd\x8b\x9b\x1f\x7a\x4d\x38\xe9\x8f\x7a\x4d\x38\xef\x35\xe1\xac\x7f\x73\xdb\x3f\xed\x8d\xfa\x67\x5f\xbf\x3d\x99\xd5\x57\xb7\xb5\x53\xdb\xa0\x53\xa0\x78\xf0\x65\x69\x43\xd0\xba\x6e\x4e\xf0\xc1\x59\x13\xb4\x90\x61\x41\xc3\x79\xf8\x73\x65\xee\xc6\xda\x97\x46\xb6\x0a\x59\x65\x9c\x9a\xbd\x19\x5a\x46\x9b\x6d\x33\xe4\x36\x72\x6f\x2d\xe4\x2e\xcb\x91\xaf\x8f\xb7\x2d\x23\x82\x40\x5b\xc5\x59\xd5\x73\x4b\xc9\x61\x15\x4c\x0b\x9e\xd2\xaf\x55\x00\x6e\x21\x5d\xef\xa6\x31\x7f\xe9\xb2\xdd\x8c\x82\xcb\x29\x1a\x7b\x86\x82\x6d\xa4\xdf\x17\x30\xb7\xe6\xd7\xc1\x4a\xc8\x0b\x74\xc5\x6d\xc5\x94\x3a\x2d\xe1\x32\x57\x5c\x5a\x03\x09\x73\x7e\x1a\x2f\x4a\x75\xdc\x6e\x3c\x5b\x75\xd8\x00\x22\x25\xfc\x21\x26\x93\x3e\x91\x93\x5b\x99\x46\x98\x16\x4c\x33\x69\x71\x59\x62\x78\x79\x98\x02\x93\x29\xb0\x47\xc6\x85\xa3\x30\x56\x6d\x88\x1c\x53\x2e\x64\x69\x93\xd2\xb8\x28\x5c\x0d\xe4\x54\x14\xca\x18\x48\x0b\x1a\x03\xa8\xb5\xd2\xa6\x0e\xe1\xaf\x06\xe2\x1d\x70\xf9\x36\x5f\x6f\x87\x4d\x78\x7d\x28\xef\x86\xd0\x8d\x05\xfb\xfa\x0c\x73\x8d\x09\x55\x60\x11\x5c\x12\x29\xf0\x87\xff\xb8\x21\x3c\xf2\xb6\xdd\x9b\x2d\x7a\x81\x2b\x6a\x90\xa5\xd1\x66\xa6\x7b\x2d\xaa\x6e\x88\xdc\x85\xb2\xaf\x45\xd6\x37\x2d\xdc\xf6\xfc\xf9\x2a\x94\x85\x17\x91\x16\x76\xa2\x2d\xbc\x84\xb8\xb0\x15\x75\x61\x1b\xf2\xc2\x26\xfa\xc2\x6e\x04\x06\x30\xae\xa0\xbc\x71\xd5\xff\xfb\x83\x74\xb8\x26\xe5\xaf\x10\xc1\xcd\xb8\x3a\x72\x20\x6c\xa8\x72\xf3\x0a\x01\x7c\xb9\x5b\x1e\x0c\x6c\x04\x83\x87\x5c\x02\x80\xb9\xe6\xd6\xa2\x2c\x65\xf1\x84\x09\xb1\x68\x42\x42\xd5\x63\x52\x58\xfe\x88\x01\xee\xd0\x08\x63\x95\xc6\xba\x92\x40\x55\xaa\x43\x16\xcb\x33\xf4\x87\x27\x8f\x4c\xbc\x06\x1c\xca\xad\xb1\x45\x49\x6e\x67\xc0\x60\xc6\xa7\x33\xd4\x30\xd5\x4c\x12\xd9\xe7\x76\x01\x33\xf6\xe8\xce\x00\x32\x26\x04\xea\xba\x43\xde\x05\x37\xab\x43\xbc\xb7\x13\xb3\xcb\xfa\x01\x20\xbc\x0a\x5c\xca\x31\x2f\xd2\xb2\x9f\x66\xe8\x2e\xbb\xd6\x08\x98\xc6\x44\xe9\xd4\x94\x07\x1f\x96\x49\x54\x85\x29\xf1\xa1\x49\x45\x09\x21\x78\x1d\xb0\x99\xef\xd0\x04\xb4\x49\x04\x43\x95\x21\x24\x2a\x1b\x73\x59\x5e\x72\xa9\x09\xc4\x5e\xfe\xfd\x03\x97\x69\xec\xe0\x3f\x76\x63\xee\xc9\x80\x18\x32\x3e\x9d\x85\x55\x3a\x95\x70\x63\x04\x53\xe4\xb9\xd2\x0e\xf1\x36\xd0\xea\xb2\x3f\xba\x1d\x9c\xde\x7f\x1e\x5c\x9d\x85\x60\x75\xde\xbb\x3b\xef\x13\xd4\x5c\x8c\x7a\x21\x56\x9d\xde\x5d\xde\x5d\xf4\x46\x83\x1f\xfb\xff\x23\xbc\x6e\x87\x06\x35\xaa\x75\x77\xde\xaf\xb1\xad\x8b\x1a\x1d\x5b\xa9\xb5\x0a\x19\x7f\x8f\x87\xe9\x6d\x79\x69\x47\x35\xd0\xdb\x0f\xc6\x2e\xb7\x8a\x59\xeb\x4f\x59\xf6\x5a\x8a\xcd\x13\x97\x20\x54\x6e\x91\xa5\x2d\x25\xc5\x22\x82\xc1\x04\xa8\x1e\x46\x69\xdd\x39\x97\x04\xe6\x77\xa5\x41\xcd\x29\x46\xfc\xee\xe7\x06\x78\x8a\xd2\xf2\x09\xaf\x6d\xe7\x9c\x69\xcb\x09\x08\xdc\x95\x70\x70\xc0\xe5\x58\xc1\xa6\xca\xab\xb8\x6e\xfa\x8a\x8e\x87\xdb\xb9\x7e\x30\x59\x3f\x9c\x73\xec\x8f\x94\x2f\xcb\xc0\xa0\xb3\x92\xdb\x29\x60\xa5\xc4\xf2\xd2\xd4\x89\x32\xee\xca\x06\x53\x98\xa1\xc6\x77\x13\xc4\xb5\x63\x2c\xfa\xf3\x35\xe7\x58\x5b\x60\xbe\x86\x01\xb5\x1e\xe5\x95\xc0\xdb\xf1\xe6\x26\xb8\x4b\xd8\x88\x04\xca\x11\xa5\xec\xe5\x71\xd3\xda\xbd\xf2\xbb\xfc\xa1\x71\x82\x1a\x65\x12\xc6\x77\x6b\x29\xf7\x18\x4e\x85\x2a\xd2\xea\x67\xc6\x24\x9b\xa2\x6e\x6f\x6a\x0a\xfe\xc8\xfb\xd8\x9d\xcf\xd7\xa3\x0e\x6b\x17\x48\x00\x06\xc5\xe4\x82\xcb\x87\xb7\x3b\x69\x58\x8e\x7c\xf3\x46\x1a\xad\xb9\xcb\xdf\x22\xd4\x0b\xe0\x25\x87\x7b\xab\x37\x6d\xed\xa8\xe6\x75\x96\xd4\x4e\x66\x36\x0f\x54\xd6\x36\x52\x13\xb8\xac\x12\x2e\xb7\x06\xce\xae\x86\xde\x86\x5c\xe3\x84\x7f\xf1\xd5\xbd\xdb\x71\xb5\x1d\x4a\x60\x7e\x77\x7b\xd1\x42\x99\xa8\x94\xb0\xbc\x27\x84\xbf\x3f\x70\x05\x3f\xa6\xeb\xb3\x18\x9f\x81\xc9\x29\xcb\x09\xe2\xa4\x30\x56\x65\xb5\x93\xf4\xf0\xa8\x5c\x69\x88\xf1\x8b\x45\x4d\x05\x60\xad\x63\xc5\x03\xca\x09\xcc\x4c\x15\x22\x75\xf4\x99\x81\x64\xb6\xd0\x4c\xc0\x8c\xa3\x66\x3a\x99\x11\x4d\x09\x04\x4f\xb5\x2a\x72\x2e\xa7\xc1\x21\xff\x31\x34\xb6\xea\xd4\xe6\xf2\x51\xf1\x04\xdb\x39\xe3\x69\x9b\x65\xee\x3e\x06\x1a\x3b\x14\x6b\xe7\x5a\x65\x94\x84\x0b\xd3\x2e\xf2\x46\x30\x6d\xe3\x9d\x17\x08\x8d\x77\xa4\x36\x1f\x14\x13\xa5\xe7\x4c\xa7\x2d\x23\x98\x99\xb5\x98\x10\x6a\x5e\xbb\x44\x06\x28\x24\x7f\x07\xa8\xdc\x49\xbe\x13\x51\x5c\x94\x91\x58\xe3\x5e\x01\xb9\xf4\xb1\xb6\x23\x5c\xb6\xf7\x37\x56\x15\x13\x18\xb8\x1b\x2c\x42\xf4\x30\x0f\xac\xde\x17\x71\xbf\xa9\x02\x8e\xc1\x0d\xc4\xee\xc0\x2c\x6e\x42\x7c\x76\x7d\x77\x72\xd1\x8f\xdd\x89\x69\x7c\x36\x18\x8e\x6e\x07\x27\x77\xa3\xc1\xf5\x55\x1c\x12\x3c\xd2\x2d\x26\xe5\x62\x7f\x36\x55\x9e\x1b\x6b\x2c\x73\x20\x5b\xbf\xe6\xf4\xe4\x35\x50\xdc\x44\x70\xc6\x27\x0e\xde\x42\xa0\x32\x0b\x43\x58\xef\x99\x0f\x98\x84\x09\x1f\xf2\x7e\x54\x59\x2f\x67\x4a\x23\x20\x33\x5c\x2c\xd6\x2e\x14\xf7\x8d\xaa\x28\x57\xb8\x01\x26\x10\x77\xa2\xce\xd1\xc3\xc9\x22\x86\x7b\x27\xf8\x3e\xbc\x8a\x64\x06\xe2\xa3\xce\xc9\x22\x6e\x96\xb9\xd6\x3b\x97\x06\x7e\xfc\x74\xf4\x91\x06\x06\x12\x77\x0a\xf9\x18\x7d\xba\x3c\x59\xc4\x07\x11\xfc\xa0\xe6\xf8\x88\xba\xb9\x74\xb9\x77\x16\x39\xfb\xc1\x4d\x44\x1c\x21\x4c\x68\x95\x95\x35\xe8\xe3\x06\x98\x98\xb3\x85\x0b\x03\x3b\x53\x85\x61\x32\x75\xe4\x71\xbc\xb0\xc4\x2c\xa4\x82\x8c\x59\x8b\x1a\x66\x6a\x1e\x1e\x9b\xd8\xd2\x8f\xeb\x8a\x3a\xaa\xb2\x50\x05\xcc\x99\xb4\xc0\xc0\x6f\xd7\x25\xd2\xa8\x92\xeb\x3a\x15\xf0\x0b\xab\x25\x12\x59\x64\x63\xd4\x34\xfb\xe9\xcd\x5d\xcb\x60\xa2\x48\x99\xea\x60\x84\xc1\xef\x6a\xdc\x5c\xde\xa4\xf9\xb7\x05\xc0\x64\x19\x61\x6b\x84\x6e\xeb\x75\xde\x7c\xa6\x4c\xe0\x2a\xf3\x74\x7a\x73\xf7\x1c\xc3\x3e\x01\xcc\x1f\x05\x7f\x64\x02\xa5\x15\x0b\x88\xbb\xd5\xa7\xea\x0a\x2e\x36\xf1\x41\x18\xa3\x03\xef\xc6\xdf\xd5\x98\xf4\x33\xd0\x3d\x6a\x76\x3a\x9f\xd6\xf5\x2e\x99\xda\xca\xf3\xdc\x2c\x6b\x33\x5a\xce\xee\x51\xa7\xf3\xa9\x16\xf9\x3d\xe1\x10\x8b\xaa\x34\x2a\xda\xf8\xab\x7c\xe9\xca\x30\xf7\x9e\xd0\x85\x6e\x55\x56\x05\x82\xe7\x6c\xb1\xe9\xb9\x6a\x47\xae\x7b\x6e\xa7\xb7\x1e\x4a\x9f\x84\xdc\x9e\x22\xda\x99\x49\x96\xad\x6d\x26\x32\x2f\x22\xfb\x60\x7f\xc9\x4d\xbd\xc5\xed\x6e\xa7\xd3\x89\x0f\xfc\xb5\x09\x4d\xf1\x99\x97\x92\x37\x04\x7b\x99\x71\xb7\x1b\xfd\xdb\xd1\xc7\xad\x92\x8e\xbe\xa1\xed\xe0\x4a\xe8\xaa\x66\x29\x51\x8d\xea\x5a\x16\x6e\xfe\x62\x5c\xde\x47\xff\x42\x03\xee\xa4\xa3\xc9\x70\xaa\x52\xff\xaa\xe0\xce\x8d\x53\x13\xb8\x44\x66\x0a\x8d\xbf\xee\x13\xe4\x9b\xe3\x76\xdb\x49\x54\x93\xcc\xb7\xbb\x37\x98\x45\x52\x64\xd1\xcc\x66\xe2\x20\x9c\xc3\x32\x99\x32\x9d\x1e\xc3\xe1\xe1\x09\x33\x3c\x29\xb5\xd9\xbf\xbb\x1a\x8c\x0e\x0e\x0f\xe1\x10\xe2\x31\x79\x74\xcc\x2d\xfd\x4d\xa0\x41\x1b\x8d\xfe\x36\x31\xf8\xc8\x09\x24\x1e\x42\x9c\x71\x49\xc5\x9a\x2c\x7c\xbf\x59\x0c\x33\x55\x68\xfa\x33\x8d\x21\x65\x0b\xfa\xab\x1b\x43\xca\x33\x94\x86\x2b\xff\x06\xf4\xf0\xf0\xc6\xf1\x03\x34\xb0\x7f\x73\xdb\xff\x7e\xf0\x5f\x07\x87\x87\x75\xc9\x0f\x31\x3c\x70\xa1\x60\xbf\xdb\xf9\xed\xe3\x01\xb5\x5c\x52\x00\x4c\x99\x6b\xf9\x87\x6b\x39\x8f\x61\xca\xcb\x96\xff\x70\x2d\xa3\x18\x2c\x6a\xdf\xd2\x3d\x3a\xa8\x0b\xbd\x89\x21\x47\x5b\x7e\xfe\xe4\x46\xf4\x63\xda\xec\xbe\xe5\xdf\x5d\xcb\x3f\x63\xf8\x13\x6d\xd9\xeb\xa8\xeb\xda\x7e\x8e\x61\xa1\x96\x6d\xdf\x6c\x08\xce\xc8\x0b\x42\x70\xf7\xbd\xe5\xf5\x2d\xa8\x2d\xd1\xde\x84\x96\xd7\x58\xc6\x20\x99\x2c\x9b\xbc\xca\x79\x0c\x39\x4f\xc2\x02\xda\x7d\xee\x1e\xb9\xef\x93\x18\x26\x98\xd9\x72\x4c\xa9\x35\x8b\x81\xd9\x65\x9b\xd7\xfb\x4f\xd2\x3b\xaf\x1a\x8f\xba\x1b\x4a\x2e\xc8\x88\x64\xd9\xe1\x1b\x37\xea\x33\x27\x47\x8f\x39\xec\x1f\xfd\xd6\xed\x78\x47\x73\xf2\xb4\x6f\x3a\xf2\x4d\xe7\x9c\x5c\x3d\xe6\xa1\x96\x47\xbf\x7d\xf4\x9f\x47\x9c\xfc\xee\x47\x7c\xe3\x9b\x6e\x38\xf9\xda\x37\x7d\xa2\xa6\xc3\x73\xcd\xb2\x8c\xe9\x43\x7f\xb3\x39\xf5\xbf\x80\x89\xda\x6b\x04\x4f\x29\x7d\x5e\x75\x4f\x18\xa4\xc4\xc4\x2a\x6d\x8e\x49\x6c\x9b\x82\xe9\x91\xbb\xd7\xd0\x4a\x83\xa6\x7c\x0b\xfb\xcc\x10\xd6\x72\x39\xe1\x5f\x40\xe5\xa8\x99\x55\xba\x06\x8a\x6b\x54\xcd\x34\x5d\x2e\x6a\x3f\x61\xc6\xb8\xf0\x50\x1a\x5f\xf2\x93\x45\xbb\xdb\xc9\x4c\x0c\xfb\x4c\x50\xba\x99\xce\x1c\x2a\x95\xdc\x90\x89\x4c\xd5\xde\xd6\x48\x4a\x74\x9e\x9c\xc6\x6d\x13\x97\x38\xe7\x31\xca\xa3\xd3\xb7\xa4\xe0\x8a\x5f\x96\xe9\x6c\xec\x8e\x43\x72\xf7\x46\x99\x85\x22\xff\x28\x50\x2f\x7c\xfd\x3c\xd1\x2a\x73\xa0\xe5\x2e\x70\xc5\xc2\x5d\x02\x17\x59\x21\x1c\x04\x93\xce\x29\x0a\x5b\xa6\xec\x83\x88\x9c\x13\xc5\x90\x15\xc2\x72\x47\x7b\x36\x9e\x0e\x28\xed\xa6\x55\xc6\xdd\xa7\xed\x72\x5a\xdd\x51\xe7\x27\x8b\x28\xf5\x2e\x7a\x78\x9a\x33\x6b\x9f\xa3\xd9\x16\x4e\x54\x2d\xa7\x7b\x30\xe4\x90\xc5\xe5\x6f\x53\xde\xa8\x9b\x63\xe8\x2f\xaf\xcd\xe1\x3b\x38\xad\xde\x17\x1e\xc3\x13\x34\xa2\xc6\xaa\x21\x90\xfc\x4c\x5f\xdb\x6b\x5f\xe1\x19\xbe\x5d\xfb\xf5\x1d\xec\xc3\x2f\xe0\x31\x04\x7e\x05\x42\x32\xf8\x17\x34\xbe\x6a\xc0\x01\xfc\x02\x3d\x29\x95\xdd\xf4\xc3\xaf\xf0\xaf\xb5\x4f\xd4\xbf\xdb\x80\x6f\xd7\x9b\xbe\x83\xc6\x53\x03\xae\x7a\x97\x7d\x68\x3c\xd3\xb7\x2b\x65\xd1\xc7\xdf\xaa\x57\x98\x13\xb8\xf1\xd9\x98\x81\x7f\x46\x65\x29\x41\x72\x5b\x99\x4f\x09\x8d\xb4\x8b\xa3\x2a\x3b\xb3\xd5\x74\xbc\xe4\x11\x4c\x28\x89\xcd\x3a\x37\x92\x65\x10\x78\x8f\xae\xc8\x00\x65\xd9\xb8\x1b\x6f\x2c\xd8\x93\xf6\xcf\xc0\x9e\xdb\x06\xbe\xfb\x0e\xba\x6d\x53\xcb\x8b\xf1\xc9\xe2\xc9\x6a\x26\x4d\x46\x09\x3f\x2d\xfb\x9d\x2c\xda\x26\x76\x51\x44\x76\xbb\xbc\xca\xc0\x90\xa8\xf2\x1d\xae\x54\xb2\x35\x16\x4c\x3e\x40\xae\xb9\x74\x65\x43\xc8\x0f\x86\xa7\x83\xc1\xfa\x33\x05\x2a\xf2\xca\x77\x98\x14\xbb\xf1\x93\x8f\xa2\x67\x3f\x4b\x37\x5e\x91\x66\x53\x86\x0c\xd3\x21\x81\xff\x25\xcc\x1b\xd4\x65\x95\xf9\x50\x46\x73\xfe\xc0\x73\x4c\x39\x73\x79\x8f\x7e\xb5\xcf\xd6\x47\xdc\xff\x51\x30\x69\xb9\x5d\x1c\xd4\x79\x71\x77\xf5\x2e\x8d\x4b\x88\xbb\xce\x76\x5f\x46\xd8\x45\xee\x0f\xa6\xfd\xa2\xcc\x69\x05\xe4\xea\x54\x08\xc6\xab\xdc\x19\xb2\x02\xca\xea\x79\xae\x55\xae\x39\xb3\xf5\x17\x60\x0d\x89\x73\x57\xe1\x1a\xc8\x51\x53\x6a\x6c\x54\x97\x90\x4b\x37\x38\x2e\x1d\x2e\x55\xb7\x5d\xee\xbd\x27\x89\xf3\x96\x1b\xff\x4c\x4d\xfb\x9e\xae\x07\x25\x51\xfc\x29\x86\xb9\xc3\x99\x0c\x99\x84\xc6\x27\x58\xce\x79\xf0\x22\x95\x6b\x04\xe4\x3a\x67\x53\x84\x47\x8e\xf3\x35\x4d\xbd\xd8\x50\xd7\xda\x91\x1b\xc4\x44\x9e\x2a\x75\x1f\x96\x8a\x3f\x3c\x91\xc0\x7b\x27\x70\xa7\xea\x54\x70\x7c\x8a\x3e\x86\x5b\x2a\xb0\xe5\x63\xa7\xb3\x4d\x33\x8f\x7b\x5f\x05\xb1\x14\x04\x4d\x20\x71\x39\x59\x97\x98\x5e\x59\xf3\xf8\x7d\x88\xd5\xa1\x36\x4c\xf9\x23\x85\x2b\xa3\x39\x12\xaa\xe8\xa6\xe8\x2a\xac\x55\x65\xb2\x29\xd3\x73\xba\x55\xe4\x70\xbf\x69\x35\x93\xfe\x42\xb9\x13\x45\xab\x09\x6b\xab\xf6\x31\x76\x36\x86\x9a\x36\x3e\x56\xd3\x97\x26\x12\xed\x88\xbe\xa2\x1c\x93\x12\xb6\xd3\x8c\x95\x9c\x72\x8f\x51\x8b\x4b\x87\xcd\x95\x22\xb5\xc4\xba\xa6\x14\x69\x54\x9e\x9e\xce\xb9\x10\xae\xb8\x2c\x53\x87\xaf\x68\xba\x9d\x8e\x53\x37\xa8\xf3\xd6\x9d\x12\x52\x01\x57\x82\x86\x86\x75\xa2\x4e\x65\x5b\x68\xcf\x9b\xcf\x23\xb6\xbe\x3c\x7a\xdd\x41\xc3\xb6\xd7\x46\x7f\x71\x59\xb2\xeb\x85\xd1\x4b\x77\x25\x8e\x7d\x7b\xd8\x2f\xb3\xaa\xc5\x29\x55\xc1\x0c\x26\x42\x31\xcb\xe5\xb4\xe5\x6e\xb5\xca\x3a\x32\x44\xe4\xbf\x77\x4d\x52\x5e\x8d\xd4\x0a\x8a\xdd\xd7\x24\x7f\xf1\x7c\xe9\xdd\x97\x1f\x1b\x8f\x95\x36\x9e\x29\x55\x0f\x94\xfe\x3b\x00\x00\xff\xff\xf1\x5b\x5f\x7c\x29\x37\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x5a\x5f\x53\x2b\xb7\x92\x7f\xe7\x53\x74\xf9\x54\x2a\x40\xd9\x63\x9b\x93\x73\x77\x97\x54\x1e\x0c\x38\xc4\x75\xf8\xb7\xd8\x24\x9b\x9b\x4a\x18\x79\xa6\x6d\x2b\x68\xa4\x89\xa4\xc1\xc7\xe1\xf2\xdd\xb7\x5a\x9a\xb1\xad\xb1\x4d\x80\x6c\x6d\xed\xf2\x82\x47\x7f\x5a\xdd\xad\xd6\xaf\xbb\xd5\xfa\x00\xa7\x2a\x5f\x68\x3e\x9d\x59\x38\xea\x1c\x1d\xc1\xb9\x52\x53\x81\x70\x71\x71\x1a\x41\x4f\x08\xb8\xa5\x2e\x03\xb7\x68\x50\x3f\x62\x1a\xed\x7d\x80\xbd\x0f\x70\xc1\x13\x94\x06\x53\x28\x64\x8a\x1a\xec\x0c\xa1\x97\xb3\x64\x86\x55\x4f\x13\x7e\x44\x6d\xb8\x92\x70\x14\x75\x60\x9f\x06\x34\xca\xae\xc6\xc1\xb7\x7b\x1f\x60\xa1\x0a\xc8\xd8\x02\xa4\xb2\x50\x18\x04\x3b\xe3\x06\x26\x5c\x20\xe0\x97\x04\x73\x0b\x5c\x42\xa2\xb2\x5c\x70\x26\x13\x84\x39\xb7\x33\xb7\x4c\x49\x84\xf8\xf8\xb9\x24\xa1\xc6\x96\x71\x09\x0c\x12\x95\x2f\x40\x4d\xd6\xc7\x01\xb3\x9e\x63\xfa\x9b\x59\x9b\x1f\xb7\xdb\xf3\xf9\x3c\x62\x8e\xdb\x48\xe9\x69\x5b\xf8\x91\xa6\x7d\x31\x38\xed\x5f\x0d\xfb\xad\xa3\xa8\xe3\xe7\xdc\x49\x81\xc6\x80\xc6\x3f\x0a\xae\x31\x85\xf1\x02\x58\x9e\x0b\x9e\xb0\xb1\x40\x10\x6c\x0e\x4a\x03\x9b\x6a\xc4\x14\xac\x22\x8e\xe7\x9a\x5b\x2e\xa7\x4d\x30\x6a\x62\xe7\x4c\xe3\xde\x07\x48\xb9\xb1\x9a\x8f\x0b\x1b\xa8\xab\xe2\x8f\x9b\x60\x80\x92\xc0\x24\x34\x7a\x43\x18\x0c\x1b\x70\xd2\x1b\x0e\x86\xcd\xbd\x0f\xf0\xd3\x60\xf4\xc3\xf5\xdd\x08\x7e\xea\xdd\xde\xf6\xae\x46\x83\xfe\x10\xae\x6f\xe1\xf4\xfa\xea\x6c\x30\x1a\x5c\x5f\x0d\xe1\xfa\x7b\xe8\x5d\xfd\x0c\x9f\x07\x57\x67\x4d\x40\x6e\x67\xa8\x01\xbf\xe4\x9a\xf8\x57\x1a\x38\x29\xd2\xef\xde\x10\x31\x60\x60\xa2\x3c\x43\x26\xc7\x84\x4f\x78\x02\x82\xc9\x69\xc1\xa6\x08\x53\xf5\x88\x5a\x72\x39\x85\x1c\x75\xc6\x0d\x6d\xa7\x01\x26\xd3\xbd\x0f\x20\x78\xc6\x2d\xb3\xae\x65\x43\xa8\x68\x8f\xcb\x89\x3a\xde\x03\xb0\xdc\x0a\x3c\x86\x4b\x25\xb9\x55\x9a\xcb\x69\xfb\x12\xad\xe6\xc9\x19\x9a\x44\xf3\xdc\x2a\xbd\x07\x90\x96\x1f\x5c\xc9\x63\x18\xcd\x70\x6d\x38\xd4\x87\x83\x46\xa3\x0a\x9d\xe0\x1e\xc0\x97\x56\x9a\x88\x96\xb1\xba\x48\x6c\x4b\xb2\x8c\x16\xda\xa4\xee\x47\xcd\x98\x69\x71\x96\x1d\xc3\x84\x09\x83\x7b\x39\xb3\x33\x43\x0c\x4e\xd1\xd2\xbf\x2d\x4c\x4c\x0a\x99\xd0\x17\x59\xa7\xdb\xde\x29\x92\x51\x4e\x94\xce\x9c\xdc\xc0\xc6\xaa\xb0\xc0\xb6\xad\x09\x90\x33\xcd\x32\xb4\xa8\x8d\x27\xdf\x82\x9d\x0c\xd2\x5f\x65\x62\xc7\x60\x75\x81\x65\x63\xc0\x52\x0f\x26\x85\x10\xc0\xa5\xb1\xee\x40\xa8\xc9\xf6\xa5\xc9\x40\x17\xaf\x15\xc9\x0d\xfe\x7f\x20\x54\x8a\x02\x2d\xbe\x56\x2a\x3f\xfa\xff\xa8\x10\x3d\x21\xde\x2a\x87\x10\x6f\x93\x24\xd7\xea\x77\x4c\xec\x4b\x02\x98\x64\x86\x19\x3b\x2e\xbf\x00\xec\x22\xc7\x63\x20\x18\x92\xd3\x3d\x00\xc1\xcd\xab\x8f\x05\x8d\xdd\x62\x42\x19\x93\x8b\xff\x5d\xb6\xc9\x5b\x28\x89\xd2\x3a\xba\x7e\x68\xb9\x44\x9d\x8f\x8a\x42\x05\x4f\xdb\xb7\xde\x23\x07\x4f\x97\xac\x99\xf6\xd3\x53\xf9\xf3\xf9\xb9\x9d\xd5\x66\x51\x2f\x31\xf4\xfc\x1c\xcc\x2f\x0c\x9a\x96\xb1\xcc\x62\x6b\xc6\xa5\x0d\xe4\xf1\x23\x72\xa6\x51\xda\x56\xa2\x24\xf9\x31\xd4\x75\x55\xac\x10\x2c\xd1\xc8\x2c\x6e\x21\x51\x83\xb7\xf5\x2e\x8d\x2c\x6d\x59\x9e\xa1\x2a\xec\x31\x74\x82\x3e\x77\xfe\x77\x75\x7a\xf3\xdb\xec\xf5\x4a\x57\xe3\x6d\x9b\x55\x7e\xb7\xdc\xa0\xe5\x87\xd7\xd4\x67\x2e\xd3\x65\xd3\x23\x13\x05\x8e\xd6\x07\x85\x32\xe7\x5a\xe5\xa8\x2d\x47\xb3\xda\xed\x75\x5b\x5c\x36\x6e\xd8\x6e\xf5\xe7\x85\x98\xaa\xd2\x33\x9c\xad\x26\xaf\x0d\xaa\x1d\xe4\x14\x2d\xe3\x02\xd3\xf5\xf6\x2a\xa0\xf0\x42\x34\x61\x3e\xe3\xc9\x0c\x12\x26\x61\x8c\xfe\x10\xf0\x75\x8a\x00\xa9\x4a\x8a\x0c\xa5\xf7\x8e\x51\xc0\xd1\x43\x31\x46\x2d\xd1\xa2\x69\xf1\x2c\x2b\x2c\xc5\x11\xc1\x6e\x02\x05\x03\xb9\x60\x8b\x2b\x62\xfa\xed\x42\xae\x26\xef\x16\x32\x51\x32\xe1\x06\xdd\xa9\x5b\x7a\xff\xad\xd2\x95\xbc\x38\x11\x49\x54\x1d\xc8\xc9\xa5\x45\x3d\x61\x09\x9a\x08\xee\x0c\x82\x41\x69\x91\x00\x30\x61\xc6\x07\x6c\xce\x95\x48\x40\x99\x96\x41\x04\x57\x69\xd3\x2d\x89\x5f\x58\x96\x0b\x84\xc6\x2d\xfe\x51\xa0\xb1\x01\xe1\x44\x15\xd2\x36\x22\x18\xf9\x90\x10\x45\x4a\x51\x92\x72\x02\x30\x01\xe3\xc2\x02\xb7\xd4\xa4\x31\x51\x59\x86\x32\xf5\x48\x34\x26\x1e\xac\xa3\xcf\xe4\x22\x20\xe9\xc5\x33\xc0\x8c\x51\x09\x67\x14\x69\xb9\x88\x92\x84\x6a\x3d\x72\xc3\x29\xa0\x23\xc5\x60\x6e\x4d\x13\x4c\x91\xcc\x80\x19\xf8\xcf\x42\x59\xf6\xe6\x2d\x14\x6c\x8c\xc2\x6c\xee\x1e\xd3\x9a\x2d\x76\x6f\xde\x85\x9b\xb6\x6b\xdf\x08\x7b\x49\x3a\x35\x29\xe9\x83\x9d\x31\x1b\xd8\xa1\x73\x19\x34\x65\x4c\xce\x6f\x19\xcf\x55\x8e\x29\x50\x88\x33\x6a\x6e\x4a\xc5\x38\x06\x23\xf8\x7e\xb5\x33\x4d\x67\x15\x31\xcb\x73\x94\x53\x2e\x31\x9a\xba\xbc\x80\xe5\xdc\x44\x89\xca\xda\x14\x4a\xb7\x5d\x4a\xa0\xdb\x1a\x4d\xae\xa4\xc1\x7b\xc1\xc8\x00\x38\x9a\x78\x8b\xf2\xdd\x1a\x30\x63\x06\x98\x97\x60\x69\x7b\x3f\x8c\x46\x37\x50\x11\x81\x44\xa5\xd8\x84\x78\x49\x94\xbe\x63\x0a\xa8\x03\x9a\x94\x3e\x90\xec\x42\xa9\x07\x60\x16\x96\x4b\x3b\xaa\xa6\x48\x12\x34\x66\x52\x88\x25\x5d\x17\x06\xff\x5e\x18\x6f\x1f\xcb\xd6\x80\xa8\xd3\xe8\xc4\x21\xc0\x9b\x37\x7d\x19\x8e\xa2\x4c\x5b\x98\xe5\x76\xb1\x63\x00\xf9\xcb\x56\x79\x9c\x71\xdd\xf0\xb9\xc5\x2c\xb0\x9a\xad\x50\x5b\x33\x1d\x3f\xa2\xee\xc0\x36\x4c\x69\x3b\xa0\x6e\x9a\x59\xad\x6b\x27\xee\xd4\xb8\x78\x09\x62\x37\x17\x81\x1e\xcc\x8a\x8c\x49\xe7\x9a\x5c\x2a\xb5\x0e\xb7\x95\x55\x38\x1b\x89\xb6\xac\xf9\xaa\xcd\xa0\xbf\x07\x5c\xfc\x3d\x81\x3e\xe3\xe2\x65\x41\xbe\xa6\x53\xf9\x80\x8b\x92\x69\x6e\x4a\xae\xa1\x6a\xcf\xc8\xe0\x32\x44\xeb\x44\x9a\x28\x21\xd4\x7c\xdb\xc2\x00\x89\xe6\x16\x35\x67\xc7\x70\x08\x67\x0a\x8d\x4b\x8c\x29\x15\xc6\x14\xba\x9d\x0e\x24\x33\xa6\x59\x42\x51\x53\x04\x87\x70\xc9\x6c\x32\x43\xf3\x97\x54\x35\x4e\x0b\xc1\x96\xa9\xa0\x63\x3a\x66\xad\x3f\x7b\xad\x7f\x1e\xc6\x70\xe8\x23\x3a\xae\x8d\x5d\xd1\xf7\x3c\x13\x84\x48\x28\xf2\x1c\x75\x6b\x0b\x5d\xa5\x41\xa8\x39\xea\x96\xc3\x7a\x81\xd6\xa2\x8e\x4a\x7a\x1a\x33\xc6\x5d\xda\xb8\xe2\x79\x49\xd4\x0f\x35\xcd\x2d\x34\x53\x3e\xe5\x04\xbe\x4a\xfb\x8c\xd2\x24\x4a\xa3\x89\xbe\xfe\x3b\x26\xb0\x0c\x33\xfe\x9e\x21\xfc\x58\x8b\x56\xb6\x0c\x7c\xe9\x20\x2e\xa7\xf7\x65\x91\xbd\xc2\xa2\x1c\x52\xaa\x09\xa4\xcc\xb2\x00\xe6\x99\x31\x7c\x2a\x3d\xd4\xbf\x70\x46\x00\x6e\x94\xf1\x3e\xcd\x29\xc0\x1c\xc3\x70\x74\x3b\xb8\x3a\x6f\xc2\xc9\xf5\xf5\x45\x13\x06\x57\xa3\x7f\x7c\xf3\xb7\x14\x0b\x80\xb2\xc8\x36\xb5\xda\x2a\x57\xda\xd2\x41\x4b\x6f\x69\x76\xbc\xac\xf9\xce\x42\x26\xb3\xa1\x65\xd3\x77\x84\x3f\x17\xab\xc9\xdb\x06\xed\xda\xa2\xe5\xa4\xda\xf6\x84\x1b\x73\x5d\xc6\x1f\xfe\x70\x7b\x3e\xc1\xd0\xb4\x30\x42\x84\x14\x27\x5c\x72\x17\xfc\x2d\xf7\x21\x90\xbb\xda\x93\x8b\xde\xdd\xd5\xe9\x0f\xf7\xc3\x51\xef\xbc\x7f\x7f\x77\x35\xbc\xe9\x9f\x0e\xbe\x1f\xf4\xcf\x9a\x70\x77\x35\xb8\xbc\xb9\xe8\x5f\xf6\xaf\x46\xf4\x79\x73\xdb\xf7\x63\x9b\xd0\xef\xdd\x5e\xfc\x7c\xdf\x3b\x3d\xed\x0f\x87\xe1\x21\xea\x5d\xdc\xfc\xd0\x6b\xc2\x49\x7f\xd4\x6b\xc2\x79\xaf\x09\x67\xfd\x9b\xdb\xfe\x69\x6f\xd4\x3f\xfb\xfa\xed\xce\xac\xbe\xbb\xad\x9d\xdc\x06\x83\x02\xc6\x83\x9e\xa5\x0c\x41\xeb\xba\x38\x41\x87\x93\x26\x68\x21\xc1\x82\x86\xf3\xf0\x73\x25\xee\xc6\xde\x97\x42\xb6\x0a\x59\x79\x9c\x9a\xbc\x19\x5a\x46\x87\x6d\xd3\xe4\x36\x7c\x6f\xcd\xe4\x2e\xcb\x99\xaf\xb7\xb7\x2d\x33\x02\x43\x5b\xd9\x59\x35\x72\x4b\xca\x61\x15\x4c\x0b\x9e\xd2\xd7\xca\x00\xb7\x04\x5d\xef\x0e\x63\xfe\x52\x65\xbb\x23\x0a\x2e\xa7\x68\xec\x19\x0a\xb6\xe1\x7e\x5f\xc0\xdc\x9a\x5e\x07\x2b\x22\x2f\x84\x2b\xee\x28\xa6\x34\x68\x09\x97\xb9\xe2\xd2\x1a\x48\x98\xd3\xd3\x78\x51\xb2\xe3\x4e\xe3\xd9\x6a\xc0\x06\x10\x29\xe1\x2f\x31\x99\xf4\x8e\x9c\xd4\xca\x34\xc2\xb4\x60\x9a\x49\x8b\xcb\x14\xc3\xd3\xc3\x14\x98\x4c\x81\x3d\x32\x2e\x5c\x08\x63\xd5\x06\xc9\x31\xf9\x42\x96\x36\xc9\x8d\x8b\xc2\xe5\x40\x8e\x45\xa1\x8c\x81\xb4\xa0\x39\x80\x5a\x2b\x6d\xea\x10\xfe\x6a\x20\xde\x01\x97\x6f\xd3\xf5\x76\xd8\x84\xd7\x9b\xf2\x6e\x08\xdd\xd8\xb0\xaf\xcf\x30\xd7\x98\x50\x06\x16\xc1\x25\x05\x05\xfe\xf2\x1f\x37\x88\x47\x5e\xb6\x7b\xb3\x85\x2f\x70\x49\x0d\xb2\x34\xda\xf4\x74\xaf\x45\xd5\x0d\x92\xbb\x50\xf6\xb5\xc8\xfa\xa6\x8d\xdb\xee\x3f\x5f\x85\xb2\xf0\x22\xd2\xc2\x4e\xb4\x85\x97\x10\x17\xb6\xa2\x2e\x6c\x43\x5e\xd8\x44\x5f\xd8\x8d\xc0\x00\xc6\x25\x94\x37\x2e\xfb\x7f\xbf\x91\x0e\xd7\xa8\xfc\x15\x22\xb8\x15\x57\x57\x0e\x84\x0d\x95\x6f\x5e\x21\x80\x4f\x77\xcb\x8b\x81\x0d\x63\xf0\x90\x4b\x00\x30\xd7\xdc\x5a\x94\x25\x2d\x9e\x30\x21\x16\x4d\x48\x28\x7b\x4c\x0a\xcb\x1f\x31\xc0\x1d\x9a\x61\xac\xd2\x58\x67\x12\x28\x4b\x75\xc8\x62\x79\x86\xfe\xf2\xe4\x91\x89\xd7\x80\x43\x79\x34\xb6\x30\xc9\xed\x0c\x18\xcc\xf8\x74\x86\x1a\xa6\x9a\x49\x0a\xf6\xb9\x5d\xc0\x8c\x3d\xba\x3b\x80\x8c\x09\x81\xba\xae\x90\x77\xc1\xcd\xea\x12\xef\xed\x81\xd9\x65\xfd\x02\x10\x5e\x05\x2e\xe5\x9c\x17\xc3\xb2\x9f\x66\xe8\x8a\x5d\x6b\x01\x98\xc6\x44\xe9\xd4\x94\x17\x1f\x96\x49\x54\x85\x29\xf1\xa1\x49\x49\x09\x21\x78\x1d\xb0\x99\x1f\xd0\x04\xb4\x49\x04\x43\x95\x21\x24\x2a\x1b\x73\x59\x16\xb9\xd4\x04\x62\x4f\xff\xfe\x81\xcb\x34\x76\xf0\x1f\xbb\x39\xf7\x24\x40\x0c\x19\x9f\xce\xc2\x2c\x9d\x52\xb8\x31\x82\x29\xf2\x5c\x69\x87\x78\x1b\x68\x75\xd9\x1f\xdd\x0e\x4e\xef\x3f\x0f\xae\xce\x42\xb0\x3a\xef\xdd\x9d\xf7\x09\x6a\x2e\x46\xbd\x10\xab\x4e\xef\x2e\xef\x2e\x7a\xa3\xc1\x8f\xfd\xff\x91\xb8\x6e\x07\x07\xb5\x50\xeb\xee\xbc\x5f\x8b\xb6\x2e\x6a\xe1\xd8\x8a\xad\x95\xc9\xf8\x3a\x1e\xa6\xb7\x65\xd1\x8e\x72\xa0\xb7\x5f\x8c\x5d\x6e\x25\xb3\x36\x9e\xbc\xec\xb5\x14\x9b\x37\x2e\x81\xa9\xdc\x22\x4b\x5b\x4a\x8a\x45\x04\x83\x09\x50\x3e\x8c\xd2\xba\x7b\x2e\x09\xcc\x9f\x4a\x83\x9a\x93\x8d\xf8\xd3\xcf\x0d\xf0\x14\xa5\xe5\x13\x5e\x3b\xce\x39\xd3\x96\x13\x10\xb8\x92\x70\x70\xc1\xe5\xa2\x82\x4d\x96\x57\x76\xdd\xf4\x19\x1d\x0f\x8f\x73\xfd\x62\xb2\x7e\x39\xe7\xa2\x3f\x62\xbe\x4c\x03\x83\xc1\x4a\x6e\x0f\x01\x2b\x26\x96\x45\x53\x47\xca\xb8\x92\x0d\xa6\x30\x43\x8d\xef\x0e\x10\xd7\xae\xb1\xe8\xe7\x6b\xee\xb1\xb6\xc0\x7c\x0d\x03\x6a\x23\xca\x92\xc0\xdb\xf1\xe6\x26\xa8\x25\x6c\x58\x02\xf9\x88\x92\xf6\xf2\xba\x69\xad\xae\xfc\x2e\x7d\x68\x9c\xa0\x46\x99\x84\xf6\xdd\x5a\xd2\x3d\x86\x53\xa1\x8a\xb4\xfa\xcc\x98\x64\x53\xd4\xed\x4d\x4e\xc1\x5f\x79\x1f\xbb\xfb\xf9\xba\xd5\x61\xad\x80\x04\x60\x50\x4c\x2e\xb8\x7c\x78\xbb\x92\x86\xe5\xcc\x37\x1f\xa4\xd1\x9a\xba\x7c\x15\xa1\x9e\x00\x2f\x63\xb8\xb7\x6a\xd3\xd6\xae\x6a\x5e\x27\x49\xed\x66\x66\xf3\x42\x65\xed\x20\x35\x81\xcb\xca\xe1\x72\x6b\xe0\xec\x6a\xe8\x65\xc8\x35\x4e\xf8\x17\x9f\xdd\xbb\x13\x57\x3b\xa1\x04\xe6\x77\xb7\x17\x2d\x94\x89\x4a\x09\xcb\x7b\x42\xf8\xfa\x81\x4b\xf8\x31\x5d\x5f\xc5\x78\x0f\x4c\x4a\x59\x2e\x10\x27\x85\xb1\x2a\xab\xdd\xa4\x87\x57\xe5\x4a\x43\x8c\x5f\x2c\x6a\x4a\x00\x6b\x03\xab\x38\xa0\x5c\xc0\xcc\x54\x21\x52\x17\x3e\x33\x90\xcc\x16\x9a\x09\x98\x71\xd4\x4c\x27\x33\x0a\x53\x02\xc2\x53\xad\x8a\x9c\xcb\x69\x70\xc9\x7f\x0c\x8d\xad\x3c\xb5\xb9\x7c\x54\x3c\xc1\x76\xce\x78\xda\x66\x99\xab\xc7\x40\x63\x07\x63\xed\x5c\xab\x8c\x9c\x70\x61\xda\x45\xde\x08\x96\x6d\xbc\xb3\x80\xd0\x78\x87\x6b\xf3\x46\x31\x51\x7a\xce\x74\xda\x32\x82\x99\x59\x8b\x09\xa1\xe6\xb5\x22\x32\x40\x21\xf9\x3b\x40\xe5\x4e\xf2\x9d\x88\xe2\xac\x8c\xc8\x1a\xf7\x0a\xc8\xb9\x8f\xb5\x13\xe1\xbc\xbd\xaf\x58\x55\x91\xc0\xc0\x55\xb0\x08\xd1\x43\x3f\xb0\x7a\x5f\xc4\xfd\xa1\x0a\x62\x0c\x6e\x20\x76\x17\x66\x71\x13\xe2\xb3\xeb\xbb\x93\x8b\x7e\xec\x6e\x4c\xe3\xb3\xc1\x70\x74\x3b\x38\xb9\x1b\x0d\xae\xaf\xe2\x30\xc0\x23\xde\x62\x62\x2e\xf6\x77\x53\xe5\xbd\xb1\xc6\xd2\x07\xb2\xf5\x32\xa7\x0f\x5e\x03\xc6\x4d\x04\x67\x7c\xe2\xe0\x2d\x04\x2a\xb3\x30\x84\xf5\x3e\xf2\x01\x93\x30\xe1\x4d\xde\xcf\x2a\xf3\xe5\x4c\x69\x04\x64\x86\x8b\xc5\x5a\x41\x71\xdf\xa8\x2a\xe4\x0a\x0f\xc0\x04\xe2\x4e\xd4\x39\x7a\x38\x59\xc4\x70\xef\x08\xdf\x87\xa5\x48\x66\x20\x3e\xea\x9c\x2c\xe2\x66\xe9\x6b\xbd\x72\x69\xe2\xc7\x4f\x47\x1f\x69\x62\x40\x71\x27\x91\x8f\xd1\xa7\xcb\x93\x45\x7c\x10\xc1\x0f\x6a\x8e\x8f\xa8\x9b\x4b\x95\x7b\x65\x91\xb2\x1f\xdc\x42\x14\x23\x84\x0e\xad\x92\xb2\x06\x7d\xdc\x00\x13\x73\xb6\x70\x66\x60\x67\xaa\x30\x4c\xa6\x2e\x78\x1c\x2f\x2c\x45\x16\x52\x41\xc6\xac\x45\x0d\x33\x35\x0f\xaf\x4d\x6c\xa9\xc7\x75\x46\x5d\xa8\xb2\x50\x05\xcc\x99\xb4\xc0\xc0\x1f\xd7\x25\xd2\xa8\x32\xd6\x75\x2c\xe0\x17\x56\x73\x24\xb2\xc8\xc6\xa8\x69\xf5\xd3\x9b\xbb\x96\xc1\x44\x11\x33\xd5\xc5\x08\x83\xdf\xd5\xb8\xb9\xac\xa4\xf9\xb7\x05\xc0\x64\x69\x61\x6b\x01\xdd\xd6\x72\xde\x7c\xa6\x4c\xa0\x2a\xf3\x74\x7a\x73\xf7\x1c\xc3\x3e\x01\xcc\x1f\x05\x7f\x64\x02\xa5\x15\x0b\x88\xbb\x55\x57\x55\x82\x8b\x4d\x7c\x10\xda\xe8\xc0\xab\xf1\x77\x35\x26\xfe\x0c\x74\x8f\x9a\x9d\xce\xa7\x75\xbe\xcb\x48\x6d\xa5\x79\x6e\x96\xb9\x19\x6d\x67\xf7\xa8\xd3\xf9\x54\xb3\xfc\x9e\x70\x88\x45\x59\x1a\x25\x6d\xfc\x55\xba\x74\x69\x98\x7b\x4f\xe8\x4c\xb7\x4a\xab\x02\xc2\x73\xb6\xd8\xd4\x5c\x75\x22\xd7\x35\xb7\x53\x5b\x0f\xa5\x4e\xc2\xd8\x9e\x2c\xda\x89\x49\x92\xad\x1d\x26\x12\x2f\x22\xf9\x60\x7f\x19\x9b\x7a\x89\xdb\xdd\x4e\xa7\x13\x1f\xf8\xb2\x09\x2d\xf1\x99\x97\x94\x37\x08\x7b\x9a\x71\xb7\x1b\xfd\xdb\xd1\xc7\xad\x94\x8e\xbe\xa1\xe3\xe0\x52\xe8\x2a\x67\x29\x51\x8d\xf2\x5a\x16\x1e\xfe\x62\x5c\xd6\xa3\x7f\xa1\x09\x77\xd2\x85\xc9\x70\xaa\x52\xff\xaa\xe0\xce\xcd\x53\x13\xb8\x44\x66\x0a\x8d\xbf\xee\x13\xe4\x9b\xe3\x76\xdb\x51\x54\x93\xcc\xb7\xbb\x37\x98\x45\x52\x64\xd1\xcc\x66\xe2\x20\x5c\xc3\x32\x99\x32\x9d\x1e\xc3\xe1\xe1\x09\x33\x3c\x29\xb9\xd9\xbf\xbb\x1a\x8c\x0e\x0e\x0f\xe1\x10\xe2\x31\x69\x74\xcc\x2d\xfd\x26\xd0\xa0\x83\x46\xbf\x4d\x0c\xde\x72\x02\x8a\x87\x10\x67\x5c\x52\xb2\x26\x0b\x3f\x6e\x16\xc3\x4c\x15\x9a\x7e\xa6\x31\xa4\x6c\x41\xbf\xba\x31\xa4\x3c\x43\x69\xb8\xf2\x6f\x40\x0f\x0f\x6f\x5c\x7c\x80\x06\xf6\x6f\x6e\xfb\xdf\x0f\xfe\xeb\xe0\xf0\xb0\x4e\xf9\x21\x86\x07\x2e\x14\xec\x77\x3b\xbf\x7d\x3c\xa0\x96\x4b\x32\x80\x29\x73\x2d\xff\x70\x2d\xe7\x31\x4c\x79\xd9\xf2\x1f\xae\x65\x14\x83\x45\xed\x5b\xba\x47\x07\x75\xa2\x37\x31\xe4\x68\xcb\xee\x4f\x6e\x46\x3f\xa6\xc3\xee\x5b\xfe\xdd\xb5\xfc\x33\x86\x3f\xd1\x96\xa3\x8e\xba\xae\xed\xe7\x18\x16\x6a\xd9\xf6\xcd\x06\xe1\x8c\xb4\x20\x04\x77\xfd\x2d\xcf\x6f\x41\x6d\x89\xf6\x22\xb4\x3c\xc7\x32\x06\xc9\x64\xd9\xe4\x59\xce\x63\xc8\x79\x12\x26\xd0\xae\xbb\x7b\xe4\xfa\x27\x31\x4c\x30\xb3\xe5\x9c\x92\x6b\x16\x03\xb3\xcb\x36\xcf\xf7\x9f\xc4\x77\x5e\x35\x1e\x75\x37\x98\x5c\x90\x10\xc9\x72\xc0\x37\x6e\xd6\x67\x4e\x8a\x1e\x73\xd8\x3f\xfa\xad\xdb\xf1\x8a\xe6\xa4\x69\xdf\x74\xe4\x9b\xce\x39\xa9\x7a\xcc\x43\x2e\x8f\x7e\xfb\xe8\xbb\x47\x9c\xf4\xee\x67\x7c\xe3\x9b\x6e\x38\xe9\xda\x37\x7d\xa2\xa6\xc3\x73\xcd\xb2\x8c\xe9\x43\x5f\xd9\x9c\xfa\x2f\x60\xa2\xf6\x1a\xc1\x87\x94\xde\xaf\xba\x27\x0c\x52\x62\x62\x95\x36\xc7\x44\xb6\x4d\xc6\xf4\xc8\xdd\x6b\x68\xa5\x41\x93\xbf\x85\x7d\x66\x08\x6b\xb9\x9c\xf0\x2f\xa0\x72\xd4\xcc\x2a\x5d\x03\xc5\xb5\x50\xcd\x34\x9d\x2f\x6a\x3f\x61\xc6\xb8\xf0\x50\x1a\x5f\xf2\x93\x45\xbb\xdb\xc9\x4c\x0c\xfb\x4c\x90\xbb\x99\xce\x1c\x2a\x95\xb1\x21\x13\x99\xaa\xbd\xad\x91\xe4\xe8\x7c\x70\x1a\xb7\x4d\x5c\xe2\x9c\xc7\x28\x8f\x4e\xdf\x12\x83\xab\xf8\xb2\x74\x67\x63\x77\x1d\x92\xbb\x37\xca\x2c\x24\xf9\x47\x81\x7a\xe1\xf3\xe7\x89\x56\x99\x03\x2d\x57\xc0\x15\x0b\x57\x04\x2e\xb2\x42\x38\x08\x26\x9e\x53\x14\xb6\x74\xd9\x07\x11\x29\x27\x8a\x21\x2b\x84\xe5\x2e\xec\xd9\x78\x3a\xa0\xb4\x5b\x56\x19\x57\x4f\xdb\xa5\xb4\xba\xa2\xce\x4f\x16\x51\xea\x55\xf4\xf0\x34\x67\xd6\x3e\x47\xb3\x2d\x31\x51\xb5\x9d\xee\xc1\x90\x43\x16\xe7\xbf\x4d\x59\x51\x37\xc7\xd0\x5f\x96\xcd\xe1\x3b\x38\xad\xde\x17\xc2\x13\x34\xa2\xc6\xea\x3b\x20\xfc\x4c\xbd\xed\xb5\x5e\x78\x86\x6f\xd7\xbe\xbe\x83\x7d\xf8\x05\x3c\x84\xc0\xaf\x40\x40\x06\xff\x82\xc6\x57\x0d\x38\x80\x5f\xa0\x27\xa5\xb2\x9b\x6a\xf8\x15\xfe\xb5\xd6\x45\xe3\xbb\x0d\xf8\x76\xbd\xe9\x3b\x68\x3c\x35\xe0\xaa\x77\xd9\x87\xc6\x33\xf5\x5d\x29\x8b\xde\xfc\x56\xa3\x42\x97\xc0\x8d\x77\xc6\x0c\xfc\x2b\x2a\x4b\xfe\x91\xdb\x4a\x7a\xf2\x67\xc4\x5d\x1c\x55\xce\x99\xad\x96\xe3\x65\x18\xc1\x84\x92\xd8\xac\x87\x46\xb2\xb4\x01\xaf\xd0\x55\x2c\x40\x4e\x36\xee\xc6\x1b\xfb\xf5\xa4\xfd\x2b\xb0\xe7\xb6\x81\xef\xbe\x83\x6e\xdb\xd4\xdc\x62\x7c\xb2\x78\xb2\x9a\x49\x93\x91\xbf\x4f\xcb\x71\x27\x8b\xb6\x89\x9d\x11\x91\xdc\xce\xad\x32\x30\x44\xaa\x7c\x86\x2b\x95\x6c\x8d\x05\x93\x0f\x90\x6b\x2e\x5d\xd6\x10\x86\x07\xc3\xd3\xc1\x60\xfd\x95\x02\xe5\x78\xe5\x33\x4c\x32\xdd\xf8\xc9\x1b\xd1\xb3\x5f\xa5\x1b\xaf\x62\x66\x53\x5a\x0c\xd3\x61\xfc\xfe\x4b\xe8\x36\x68\xc8\xca\xf1\xa1\x8c\xe6\xfc\x81\xe7\x98\x72\xe6\xdc\x1e\x7d\xb5\xcf\xd6\x67\xdc\xff\x51\x30\x69\xb9\x5d\x1c\xd4\xc3\xe2\xee\xea\x59\x1a\x97\x10\x77\x9d\xec\x3e\x8b\xb0\x8b\xdc\xdf\x4b\xfb\x4d\x99\xd3\x0e\xc8\xd5\xa5\x10\x8c\x57\xae\x33\x0c\x0a\xc8\xa9\xe7\xb9\x56\xb9\xe6\xcc\xd6\x1f\x80\x35\x24\xce\x5d\x82\x6b\x20\x47\x4d\x9e\xb1\x51\xd5\x20\x97\x6a\x70\xa1\x74\xb8\x55\xdd\x76\x79\xf4\x9e\x24\xce\x5b\x6e\xfe\x33\x35\xed\xfb\x68\x3d\xc8\x88\xe2\x4f\x31\xcc\x1d\xcc\x64\xc8\x24\x34\x3e\xc1\x72\xcd\x83\x17\x23\xb9\x46\x10\x5b\xe7\x6c\x8a\xf0\xc8\x71\xbe\xc6\xa9\x27\x1b\xf2\x5a\xbb\x71\x83\x98\x62\xa7\x8a\xdd\x87\x25\xe3\x0f\x4f\x44\xf0\xde\x11\xdc\xc9\x3a\xe5\x1b\x9f\xa2\x8f\xe1\x91\x0a\x64\xf9\xd8\xe9\x6c\xe3\xcc\xc3\xde\x57\x81\x2d\x05\x46\x13\x50\x5c\x2e\xd6\xa5\x40\xaf\x4c\x79\xfc\x39\xc4\xea\x4e\x1b\xa6\xfc\x91\xcc\x95\xd1\x1a\x09\x25\x74\x53\x74\x09\xd6\x2a\x31\xd9\xa4\xe9\x43\xba\x95\xe5\x70\x7f\x68\x35\x93\xbe\x9e\xdc\x89\xa2\xd5\x82\xb5\x5d\xfb\x18\x3b\x19\x43\x4e\x1b\x1f\xab\xe5\x4b\x11\x29\xea\x88\xbe\x22\x17\x93\x12\xb4\xd3\x8a\x15\x9d\xf2\x8c\x51\x8b\xf3\x86\xcd\x15\x23\x35\xbf\xba\xc6\x14\x71\x54\x5e\x9e\xce\xb9\x10\x2e\xb7\x2c\x3d\x87\x4f\x68\xba\x9d\x8e\x63\x37\x48\xf3\xd6\x95\x12\x46\x02\x2e\x03\x0d\x05\xeb\x44\x9d\x4a\xb6\x50\x9e\x37\x5f\x47\x6c\x7d\x78\xf4\xba\x7b\x86\x6d\x8f\x8d\xfe\xa2\x56\xb2\xeb\x81\xd1\x4b\xa5\x12\x17\x7c\x7b\xd8\x2f\x9d\xaa\xc5\x29\x25\xc1\x0c\x26\x42\x31\xcb\xe5\xb4\xe5\x8a\x5a\x65\x1a\x19\x22\xf2\xdf\xab\x92\x94\x95\x91\x5a\x3e\xb1\xbb\x4a\xf2\x17\xaf\x97\xde\x5d\xfb\xd8\x78\xab\xb4\xf1\x4a\xa9\x7a\x9f\xf4\xdf\x01\x00\x00\xff\xff\xde\x51\x33\xf8\x28\x37\x00\x00"), }, "/monitoring/monitored_project.yaml": &vfsgen۰CompressedFileInfo{ name: "monitored_project.yaml", @@ -738,9 +738,9 @@ var Assets = func() http.FileSystem { "/osconfig/beta/guest_policy.yaml": &vfsgen۰CompressedFileInfo{ name: "guest_policy.yaml", modTime: time.Time{}, - uncompressedSize: 46215, + uncompressedSize: 46212, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x3d\x5b\x73\xdb\xb6\xd2\xef\xf9\x15\x3b\xc9\x43\x92\x19\x59\xea\xc9\xe9\xe5\x1b\x7f\x73\x1e\x14\x5b\x4d\x35\xc7\xb6\x3c\x96\x9d\x4e\xda\xe9\xc4\x10\xb9\x92\xf0\x85\x04\x78\x00\xd0\xb2\x4e\xa7\xff\xfd\x1b\x5c\x78\x07\x29\xca\x71\xdb\xb8\x15\x1f\xda\x58\x04\x16\xbb\x8b\xbd\x61\xb1\x00\x5f\xc0\x09\x4f\xb6\x82\xae\xd6\x0a\xde\x7c\xf5\xe6\x0d\xbc\xe3\x7c\x15\x21\x9c\x9d\x9d\x0c\x61\x1c\x45\x70\xa5\x5f\x49\xb8\x42\x89\xe2\x0e\xc3\xe1\xb3\x17\xf0\xec\x05\x9c\xd1\x00\x99\xc4\x10\x52\x16\xa2\x00\xb5\x46\x18\x27\x24\x58\x63\xf6\x66\x00\xef\x51\x48\xca\x19\xbc\x19\x7e\x05\xaf\x74\x83\xe7\xee\xd5\xf3\xd7\xff\xfb\xec\x05\x6c\x79\x0a\x31\xd9\x02\xe3\x0a\x52\x89\xa0\xd6\x54\xc2\x92\x46\x08\x78\x1f\x60\xa2\x80\x32\x08\x78\x9c\x44\x94\xb0\x00\x61\x43\xd5\xda\x0c\xe3\x80\x68\x3c\x3e\x38\x10\x7c\xa1\x08\x65\x40\x20\xe0\xc9\x16\xf8\xb2\xdc\x0e\x88\xb2\x18\xeb\x67\xad\x54\x72\x3c\x1a\x6d\x36\x9b\x21\x31\xd8\x0e\xb9\x58\x8d\x22\xdb\x52\x8e\xce\xa6\x27\x93\x8b\xf9\xe4\xe8\xcd\xf0\x2b\xdb\xe7\x86\x45\x28\x25\x08\xfc\x4f\x4a\x05\x86\xb0\xd8\x02\x49\x92\x88\x06\x64\x11\x21\x44\x64\x03\x5c\x00\x59\x09\xc4\x10\x14\xd7\x18\x6f\x04\x55\x94\xad\x06\x20\xf9\x52\x6d\x88\xc0\x67\x2f\x20\xa4\x52\x09\xba\x48\x55\x85\x5d\x19\x7e\x54\x56\x1a\x70\x06\x84\xc1\xf3\xf1\x1c\xa6\xf3\xe7\xf0\x76\x3c\x9f\xce\x07\xcf\x5e\xc0\x8f\xd3\xeb\x1f\x66\x37\xd7\xf0\xe3\xf8\xea\x6a\x7c\x71\x3d\x9d\xcc\x61\x76\x05\x27\xb3\x8b\xd3\xe9\xf5\x74\x76\x31\x87\xd9\xf7\x30\xbe\xf8\x00\xff\x9e\x5e\x9c\x0e\x00\xa9\x5a\xa3\x00\xbc\x4f\x84\xc6\x9f\x0b\xa0\x9a\x91\x76\xf6\xe6\x88\x15\x04\x96\xdc\x22\x24\x13\x0c\xe8\x92\x06\x10\x11\xb6\x4a\xc9\x0a\x61\xc5\xef\x50\x30\xca\x56\x90\xa0\x88\xa9\xd4\xd3\x29\x81\xb0\xf0\xd9\x0b\x88\x68\x4c\x15\x51\xe6\x97\x06\x51\xc3\x67\x94\x2d\xf9\xf1\x33\x00\x45\x55\x84\xc7\x30\x9b\x9f\x70\xb6\xa4\xab\xd1\xbb\x14\xa5\xba\xe4\x11\x0d\xb6\xcf\x00\x42\x94\x81\xa0\x89\x86\x72\x0c\xd7\x6b\xcc\xdb\x41\xa9\x1d\x08\x94\x3c\x15\x01\x3e\x03\xb8\x3f\x0a\x83\xe8\x48\x2a\x91\x06\xea\x88\x91\x18\x8f\xa1\x0a\xd1\x36\x58\x13\x79\x44\x49\x7c\x0c\x4b\x12\x49\x7c\x96\x10\xb5\x96\x1a\x9b\x15\x2a\xfd\x3f\xcf\xc0\xcb\x94\x05\xfa\x2f\x2d\x8a\x66\x2e\x57\xa8\x25\x70\xc9\x45\x6c\x88\x04\xb2\xe0\xa9\x02\x52\x1b\x0e\x20\x21\x82\xc4\xa8\x50\x48\x0b\xf9\x08\x7c\x68\xe9\x27\x93\xa2\x63\x50\x22\x45\xf7\x63\x05\x91\x31\x2c\xd3\x28\x02\xca\xa4\x32\x32\xcf\x97\x8d\x01\xb5\xf8\x6d\xfb\xd2\x60\x1a\x7f\x89\x54\x84\x18\xa1\xc2\xbe\x64\xd8\xd6\x5f\x0a\xd6\xe3\x28\xda\x17\xf1\x28\xea\x8d\x7a\x22\xf8\xff\x61\xa0\xba\xd0\x96\xc1\x1a\x63\x72\xec\xfe\x02\x50\xdb\x04\x8f\x41\x5b\x10\xb6\x7a\x06\x10\x51\xd9\x5b\xc8\x75\x5b\x8f\x7c\xc4\x84\x6d\xff\x30\x8c\xb5\x8d\xe7\x0c\x99\x32\x70\x6d\x53\x37\x44\x09\x85\xac\xb3\xb3\x27\xcd\x19\xb6\x8a\x4f\xc3\x1c\x21\x39\xfa\xf5\x57\xf7\xcf\xdf\x7e\x1b\xad\xf2\x0e\x14\xf5\x1b\x8d\xfb\x6f\xbf\x55\xfa\x26\x44\x20\x53\x47\x01\x67\xda\x9b\xa0\xa8\x93\x56\x98\x96\x40\x20\x51\x58\x21\xd1\x6b\x77\xca\xaf\x04\x92\xf0\x48\xd1\x18\x79\xaa\x8e\xe1\xab\xca\x3b\xa3\xa7\x6d\x2f\xad\x10\x35\xdf\x5a\x26\xf2\x85\x8f\xf9\xee\x6f\x3b\x45\xf9\x1f\x55\x72\x12\xc1\x13\x14\x8a\xa2\x2c\x26\x86\x48\x49\x57\x2c\x46\xa6\x8a\xdf\xbc\x23\x15\xe8\xad\xb8\xb3\xc3\xe3\xbc\xaf\xaf\x8d\x85\x51\x9a\x36\x6f\xf3\x8a\xc4\xce\xad\x3f\x42\x69\x3c\xcb\xfb\x73\xfd\x7f\xa2\x80\x08\x74\x78\x6a\x19\xd6\x41\x43\x62\x00\x0e\xe1\x5a\xff\x41\xa2\x88\x6f\x64\x09\x26\x98\x48\x43\x71\x50\x44\x68\xa3\x2e\x51\x19\x8f\xb8\x12\x3c\x4d\xa4\xd6\x73\x0d\x7a\xb1\x85\x90\x2e\x97\xa8\x45\xa0\x24\xea\x20\xd3\x60\x0d\x44\x42\x44\x16\x18\xc9\x41\x05\xae\xa6\x5b\x0e\x60\x36\x1f\x68\x78\xff\xe5\x0c\xe5\x10\x26\x71\xa2\xb6\x25\x46\x4a\xd8\xd0\x28\xca\x06\x1f\x9f\x9d\x99\xe1\x8c\xc7\x64\x48\xd4\xba\x02\xb1\x42\x8e\xf6\x84\x11\x0d\x14\x9c\x13\x46\x56\xa8\x81\x41\x26\xc1\x96\x15\x78\xaf\xf5\x77\x4d\x57\xda\xd9\xa7\x89\x8e\x3f\xd4\x1a\x2b\x10\x33\xdf\x09\x6b\x8a\x82\x88\x60\xbd\x85\x57\x41\xc4\xa5\xf6\xd7\xdc\x30\x76\x26\x56\xaf\x2d\x8e\xda\xdb\x0b\x1a\xea\xe8\x80\x4b\x84\x88\x6f\x50\x40\xc8\x37\xac\x02\x91\x9a\xf0\x4a\x98\xc0\x45\x47\x5c\x16\xc9\x21\x8c\x95\x8d\x22\x48\x8c\x10\xe1\x1d\x46\x0e\x1d\x2f\x0a\x14\x87\x15\xa0\x3a\xba\x33\x11\x9c\x93\xd1\xd7\x03\x0b\x0c\xc5\x1d\x35\xc1\x5f\x14\x41\x22\xf0\x4e\xf3\x40\xbf\x30\x1a\xa8\x0d\x16\x5f\x42\x9c\x46\x8a\x26\x11\x5a\xc6\x51\x94\x35\x96\x12\x95\x63\x69\xa3\x48\x24\xc1\x1a\xb8\x26\x62\x08\xd3\x8c\x1a\x2d\x56\x0d\x48\xb6\xb7\x0d\x8c\xb6\x35\xb0\x8e\xd4\xc0\xc6\x2b\xaf\x70\x35\x84\x84\x04\x9f\xc8\x0a\x8b\xe0\x0f\x04\x06\x34\xc1\x01\x08\x4c\xb8\xa4\x8a\x8b\xed\x00\x50\x05\x43\x4b\x5f\x05\x62\x85\x56\x64\x32\x15\x68\x87\x67\x1c\xde\x9f\x43\xc0\xd3\x28\x84\x84\x2b\x64\x8a\x92\x28\xd2\x61\x51\x80\xf4\x0e\x8d\xcb\xd2\xf1\x90\x0e\xc5\x2a\x10\x97\x82\xc7\xb0\xe0\x6a\x9d\xd3\x33\x84\x6b\x6e\x79\xb7\x8b\xd8\x42\x1b\x2a\x20\xef\x6c\x4c\x2f\xad\x7b\x74\xf4\x6a\xe9\x2f\x94\xc7\x32\x44\x9a\xb0\xb2\xf8\x75\x96\xa0\x20\x3a\x32\x86\xf9\x56\x2a\x8c\x6b\xaa\x64\xa6\xc4\x0a\x3e\xc4\xa9\x54\xb0\xd0\x08\xaa\xd4\x10\x8a\xf7\x41\x94\x4a\x4b\xaa\x66\x1b\x15\x4e\x9d\x34\x38\x12\x04\x5c\x84\xfa\x5f\x8a\x57\x40\x3a\x8d\xcd\xd4\xd3\xcc\x37\x04\x82\x2a\x14\x94\x0c\xe1\xb4\x81\xb0\x9e\x2f\x1a\x6a\xf6\x2e\x29\x86\x06\xff\x4c\x6e\xaa\x7c\xa5\xac\x44\xd8\x86\x6c\xe5\x10\x2e\x2d\x27\x1a\x40\x16\x5b\x87\xb0\xb6\x15\x3a\x72\x36\x82\xe3\xf8\x56\x01\x1a\x1b\x25\x17\xaf\xe4\x6b\xdd\x64\xeb\x28\xcc\x21\x17\x12\x44\x3b\x46\x49\x19\xfd\x4f\x5a\x85\x4b\x43\xd8\x58\x01\xcf\x17\x2e\x43\x98\xf3\x38\xc7\x22\x1b\x59\x42\xc8\xd9\x4b\x05\x6b\x72\xa7\x63\x2d\x0b\xaa\x18\x45\x54\x45\x8b\x8b\x1a\x46\x2c\x1b\x47\x4b\xd1\x4b\x6b\xb3\x03\xa2\xd7\x81\x8c\x3b\x60\x4c\xaf\x45\xa8\x84\x3b\x12\xd1\x90\x28\x0c\x2b\x20\x2d\x11\x99\x1a\x0c\xe1\x82\x2b\xa7\x00\x74\x09\xb3\x39\x4c\x99\xd6\x7e\x2e\xb6\x6e\xc5\xa4\x29\x09\x07\x40\xb4\x72\x18\x9d\xd1\x0b\xc9\x45\x95\xf8\xdc\x4f\x90\x4c\xb8\x0c\x40\xcb\x5b\x63\xf4\x67\x73\x58\x60\x40\xec\x02\xb4\x66\x70\x24\xda\x55\x69\x05\xe4\xfb\xf3\x97\x52\xf7\x22\xda\x88\x7f\x62\x7c\xc3\xca\x96\xcc\xe7\x54\xf5\x63\x9c\xcd\x99\x91\xc8\xea\x8b\xcc\xbb\x12\x21\xc8\xb6\xf6\xa6\xe6\x60\xdf\x15\x30\x6a\x0d\xab\x81\x9e\x23\x2e\x8b\x64\x25\xc4\x44\x05\x6b\xa3\x2b\x0a\x22\x24\x52\x01\x67\xe8\x96\xc9\xda\xce\x6b\x90\x35\x88\x60\xdc\x64\xc5\xa5\xea\x65\x69\xe1\xd6\x4a\x0e\x35\xa4\x52\x3b\x4c\x85\xce\xa5\x0e\x8c\x74\xe0\x3d\x89\x93\x08\x1b\x70\x9f\x23\xbb\xfb\x57\x22\x78\xa8\x95\x52\xff\x5b\x2a\xb2\xa2\x6c\xf5\x7c\xe8\xa5\x5e\x22\x0b\x8f\x50\xfb\xd4\x4a\xb8\x55\x6d\xa4\xc3\x58\x17\x61\xe8\x7f\xd6\xda\x50\x6d\x6e\xea\x5c\x6f\x8d\x6a\x6a\xac\xef\x88\x5b\xda\xe7\xa3\x5d\x0e\xec\x13\x79\x05\xa1\x07\x5a\xfa\x21\x61\x48\xf5\x4c\x93\xe8\xb2\x73\x10\x68\x2e\x0d\x9a\x4f\x4d\xc4\x5a\xa8\x81\xba\x8c\xb9\x1c\xd1\x09\x8f\x93\x54\x21\x4c\xd8\x8a\x32\x2c\xd6\x4e\x96\x3e\xab\x6b\xce\x9a\xb7\x20\x98\x08\x94\x5a\x9a\xb4\xc4\x10\x56\x40\x50\x5c\x7b\x00\xca\x82\x28\x0d\x31\xb4\x76\x5f\x4b\x62\x21\x7f\x46\xd6\x86\x35\xbb\x6c\x7b\x5f\x90\x18\x2f\x05\x2e\xe9\x7d\x93\x33\xbd\xb5\x6d\xea\x01\xd6\x47\xed\xde\x9f\x97\x34\x6f\x63\x02\x29\x63\xfb\xa5\x22\xc2\x04\x83\x6a\x5d\x51\x3f\x8f\xe8\xd8\xc1\x86\x70\x46\x3f\x61\xee\xc6\x0c\xfd\x9a\x05\xcc\x7a\xb2\x0d\xd9\x9a\x34\x85\x66\x43\x7d\x50\x64\x4d\x69\xcf\x1d\xa6\x73\x77\x15\x2d\x75\x83\xfe\xeb\xb9\x56\xcc\xa3\xe7\x43\x98\xb1\x68\x0b\x32\x4d\x12\x2e\x94\xf5\x85\x3e\x11\xd7\x32\x7a\x64\x63\xbd\x3c\xc2\xf8\x53\xb4\xb8\x45\xc2\x6b\x5a\xec\x69\x95\x73\xed\xb3\xe5\xa4\x97\x70\xe8\x75\xb5\x4b\x4f\x16\xf3\xe5\xf2\x6e\x18\x0e\x0b\x60\xc6\xc3\xe7\x2f\x1a\x84\xe5\x0e\xff\xe6\x6a\x9a\x85\xd9\x7a\x11\x0f\xb7\x66\x15\x32\xfa\xf9\xa7\xd9\xc5\xe4\x97\x51\x3e\xc6\xe8\xe7\xe9\xc5\xfc\x7a\x7c\x71\x32\xf9\x78\x31\x3e\x9f\xfc\x72\x5b\x9f\x26\xc8\x87\x2e\x49\x0a\xd5\x2e\x2e\xe0\x71\x6c\x72\x92\xa1\xfe\xbb\x10\x09\xc5\x61\x49\x02\x1a\x51\x45\x8c\x9f\x6e\x0a\x72\x5c\x2c\x5b\xf8\x12\x82\x35\x61\x3a\x40\x72\x6e\xbe\xc8\xb3\x88\xf2\xda\x2c\x4b\x41\x96\x05\xba\x01\xd8\x44\x96\x5a\xea\x78\x62\xa0\x9b\xb0\x0a\xa5\xc6\xd9\x27\xb9\x2d\x92\xda\x00\xab\xc1\x18\x6b\x25\xd0\xc4\x76\x41\x79\x92\xdc\x02\xc5\x2e\xd0\x2c\xbc\x27\x26\xea\x50\x4a\x45\x38\xfa\x3c\x83\x1c\xe5\xcb\xb5\xe3\xcc\xbc\x8f\x32\xd1\xf0\xd8\xf0\x25\xc5\x28\x3c\x06\x89\xd1\xf2\x8c\xb2\x4f\x95\x16\x5c\x5e\x6f\x93\xcf\xd0\xac\xd9\xdc\xf4\xdf\xdb\xe8\x76\x85\x3b\xb0\xe4\x3a\x98\xf1\xf1\x66\x36\x37\xa8\xc9\x61\x0d\x9a\x96\x08\x03\xd2\xe4\xf1\xb4\x64\x45\x3a\xe6\xce\x56\x12\xd6\x75\xc1\x8a\xde\x79\xac\xee\xcc\xb0\xa0\xe6\xcd\x9e\x5c\xa0\xe3\x9f\x88\x5d\x41\x0e\x97\x63\x11\xac\xa9\xc2\x40\xa5\x02\xbb\x82\x9d\xfe\xf1\xc9\x6c\x5e\x86\xb9\x3b\x4e\xf1\x3b\x65\xed\x82\x2b\x0b\x0a\x64\x66\x31\x61\x76\x36\x7c\x30\x41\x2f\x8a\xcc\x12\xb3\x2c\x41\x66\x15\x50\x42\xa7\x69\x53\x35\x13\xe6\x6b\x2e\x94\x0e\x22\x1e\x8b\x03\x39\xc0\x2f\x80\x7c\xa9\x71\x31\xf1\x4d\x35\x9c\x78\x1e\xe2\x82\x12\xf6\x1c\x3c\x71\x83\x7d\x9e\x6f\x28\x0b\xf9\x46\x36\xc2\x7e\xcb\x35\xb7\x77\xf8\x58\x3c\x73\xe0\xfe\x6c\x8e\x55\x78\xe7\x52\x29\x55\xfa\x8d\xfb\x7e\xb0\xc5\xfc\x49\xf7\xde\x6f\x6d\x48\x59\x29\x28\x91\x98\x65\x31\xcf\x90\xdc\x21\x18\x73\x54\xcf\xab\x40\x1e\x4d\x36\xc1\xe8\xde\x43\x8d\x06\x89\x7a\xc6\x11\x4d\xd0\xf5\xb8\xa2\x3d\x8e\x30\xa3\x3d\x1d\x27\x6c\xf3\x6e\xd7\xb4\x6a\x0a\x5a\x00\xda\x2d\x99\x63\x08\x89\xcb\xfd\xb7\x27\xde\x4f\x72\xb8\xa5\x36\x02\x49\xa8\x43\xa1\x06\xfd\x15\x79\x98\xa5\x2a\x49\xb5\x7f\x8c\xb6\x43\xd0\x10\x6c\x80\x53\xde\x7c\xdd\x10\xe9\x30\xaf\x38\xae\xfb\xa3\x4f\xe9\x02\x05\x43\x85\xf2\x88\xc6\x71\xaa\xb4\x46\xd4\x46\x2b\x8f\xb5\x9b\xe4\x1a\x59\xa7\x45\xe7\x36\xf4\x4b\x4d\x32\xff\x5e\x42\x5d\x4b\x31\x5b\xe9\xe5\x96\x7d\x55\xea\x0a\xb5\xa4\x8e\xd9\xcc\xb6\x41\xed\x3f\xbe\x7a\xf3\xb5\x16\x32\x41\x02\x85\xa2\xb4\x9e\x41\x45\x56\xfb\x53\x31\x51\x64\xb5\xf7\xb4\x5c\xaf\xd1\x0c\xe7\xb6\xe6\xab\x33\xe2\x72\xd6\x76\x35\x98\x08\x7e\x47\x43\x5b\x36\x90\x26\x5a\x58\xaa\x79\x55\xaa\xca\x01\x4c\x96\xe1\x42\xf1\x52\x9a\x01\xf6\x9e\x51\x46\x7a\x49\x6f\x8d\x09\x35\x6f\x55\xa1\xf5\xe5\x8d\x4d\x34\x9a\x45\xb2\x9b\xa9\x7c\xc7\xa0\x16\x71\x43\x2a\x0b\xa3\x2a\xe2\xaa\x76\xde\x16\xfb\x7e\xee\x5f\x1f\x69\xd8\xd8\xf8\x33\x7f\x7e\xb4\xc9\x40\xfd\xfe\x76\xf8\x32\x87\xe2\x12\xa2\x57\xa5\xbc\x66\x93\xd6\xba\x19\xae\x91\x7a\xd9\x84\xd1\x46\xf9\x19\x95\xc6\xa2\x25\xf5\x0c\xef\xd6\xad\xd1\x53\xe1\xea\x2b\x8a\xed\x2e\xee\xf6\xc2\xbc\x8b\xd8\x61\x03\xad\x0e\x3b\xb8\xcb\x06\x7a\xec\x5f\x6b\x20\xd9\x1e\x44\x76\xb3\xa3\x2b\x84\x24\x89\x7a\x68\x28\xeb\xf6\x23\x93\x7d\xa2\x5d\x0f\xa2\x3e\x00\xd5\x92\x01\xa6\x07\x81\xbc\xcf\xb6\x19\xcb\xd8\x01\xf3\x6d\x04\xdf\x7a\x6b\xc5\x9b\xfe\xf5\x08\xb6\x69\xec\xf9\xf5\xbf\xdb\x24\xc1\x66\x48\x55\xdf\xf7\x2d\x77\x49\x05\xf5\xfc\x9a\x57\x1c\xf9\xa2\xa2\xee\xb8\xde\x84\xbc\x77\xa8\x97\x04\x8f\x13\x9e\x8d\x0b\x80\xdd\xcd\x7b\x4e\x5a\x09\xde\x84\x79\xd8\x08\x0d\x13\x64\x16\x69\x7c\x99\x91\x66\xaa\xd0\x64\x6e\x7c\x0a\xbd\x1c\x1a\xcb\x1c\xe2\x92\xa4\x91\x3f\x39\x0b\xb0\x40\x1d\xfa\x71\xa1\xad\xf3\xe9\xe4\xed\x10\x2e\xb9\x94\x74\x11\x21\xdc\x91\x28\x45\x79\x0c\xe3\xab\x93\x1f\xa6\xef\x27\x1f\xaf\x3f\x5c\x4e\x3e\xde\x5c\xcc\x2f\x27\x27\xd3\xef\xa7\x93\xd3\x41\x0b\xc4\xd3\xc9\xdb\x81\xfe\xcf\xc7\xf9\xd5\xc9\x4b\x6f\x1b\x64\x69\xec\x9f\x8b\xa3\xd6\xd1\x5a\x9a\x9f\x4e\xde\xb6\xbf\xd1\x18\x78\xde\x56\x2b\x38\x9a\x4f\x7b\xfc\x6a\x9f\x7a\x3c\x93\x83\xdb\x3d\x75\x57\x4e\xf2\x87\xb9\x35\x2d\x90\x29\xbc\xa7\xe8\xd0\x50\xfb\x9c\x6b\x27\xe9\x2a\x3f\xaa\x79\x03\x6d\x07\xfd\xbd\x7a\x85\x9a\xe5\xa6\xdd\x01\xa7\x7d\x5a\xc2\xce\x32\x23\x3b\x34\xab\x6f\x1e\x08\x2a\x06\xe0\x71\xd4\xf8\xb4\xcb\xa4\x40\xfb\xc4\x95\xfb\xd9\x00\x60\xe7\x8c\xad\x92\xd5\xbf\x71\xfb\x38\x68\xbf\x33\xb0\x76\x23\x7c\x73\x35\xcd\xc2\x93\x4f\xb8\xb5\x85\xaa\x3e\x01\x33\x26\x82\xac\xea\x1b\xe7\xc5\x13\x13\x6a\xc4\x4c\x02\xd1\x90\x84\xcb\x54\xdd\x8e\x50\x05\x23\x92\xa8\x91\x12\xa9\xd4\xd1\xf6\x2a\x59\x0d\xc3\x11\x97\x36\x18\xf8\x68\x60\x7e\xb4\x8b\x21\xf3\xf2\xb6\x65\x00\x27\xc6\x06\x6e\x14\x65\x18\xe7\x6b\x34\xe2\x12\x58\x26\x12\xca\x8a\x4d\x3c\xa0\x52\x41\x1f\x87\xc3\x37\x1e\x0f\xd4\x60\x6f\x21\x0f\x9a\xd1\x3d\x34\x77\xc5\xf9\xe7\xc5\x07\xef\x3c\x7e\x77\x2f\x57\xe3\x03\x50\x2b\x29\x7c\xc7\xf9\xe7\x86\x07\xc4\x13\x85\x3c\x62\x78\xc0\x7c\x89\x24\x1d\x35\x34\x77\x84\xbb\xc3\x82\xfa\xca\xa0\x78\xf6\x95\x97\x7e\xb9\xad\x42\x60\xb4\xc2\x55\x57\x0f\x5d\xb6\x23\x15\xd1\x63\x89\x75\x93\x45\x3b\xb0\x4c\x45\xb4\x1b\xc9\xad\xcf\x97\xef\x23\xd8\x1f\x3c\xb2\xb1\x97\x60\xfb\x00\xd4\x04\xfb\x43\x1a\xff\x3e\x82\xed\x8f\x86\x1f\x20\xd8\xb4\x99\x5a\x3a\x82\x05\x91\xe8\x9b\xb7\x6e\xd1\x76\xbd\x1e\x47\x6c\xde\xb6\xa0\x00\xdd\xa2\x13\xf1\x80\x94\xf3\x1c\xa5\xc5\x62\x48\x05\x06\x6d\xe2\x1e\x52\x99\x44\x64\xfb\x78\x09\xe8\xd3\x02\xe0\x6e\x1a\x4c\xb8\x6c\x3b\xec\xa1\xa2\xd6\xbd\x3f\x4e\x30\x69\xdd\x7b\x8f\x48\xf2\xe6\x6a\x6a\xaa\xda\xde\x5d\xbe\x33\xee\xf2\x2f\x18\xf3\x51\x8f\xa2\xf4\x1a\xa2\xbe\xe5\xed\x4f\x7a\xb7\x48\xef\xd8\x84\xd1\x1b\x2e\xc2\x41\x56\x54\x66\x44\x61\x8f\xe8\xfc\xda\x65\xbb\xb4\xe8\xdc\xea\xe6\x40\xc3\xdb\x6c\xdf\x5b\x9b\x22\x57\x84\x69\x82\x32\xc2\x42\x20\x91\xb4\x99\x92\x5b\x27\x7d\x1f\xf5\x90\x6d\x01\x13\x5d\xd6\xda\xe9\xb1\x78\x4c\x95\xb2\xda\xa7\x07\x37\x59\x6b\x03\xd6\xd4\xce\x13\x8b\x8c\xa7\xde\xae\xcc\xee\xac\x74\xce\x94\x64\x40\xb0\xc6\xe0\x93\xdd\x06\x10\x95\xe8\xab\x28\x33\xac\x33\xc0\xda\xbd\xcf\x73\x07\x3f\xf9\x6d\xe7\x5e\x1e\xa1\x05\x46\xcd\x29\xd8\x56\x7f\xa4\x5f\xf0\x85\x41\x07\xa7\x70\x70\x0a\x3b\xd0\x3d\x38\x85\xd2\x38\x7f\x49\xa7\x60\x0d\x67\xab\x5f\x68\x01\x59\xf7\x02\xfb\xf8\x85\x16\x90\x4d\x6f\xd1\xdf\x2f\xb4\xaf\xef\xeb\xde\xc2\x6d\x21\x3c\x78\xbb\xa2\xf7\x1e\xc5\xce\x8d\x89\x27\xb0\x19\xd1\x77\x07\x22\x44\xa9\x85\x75\xae\x88\xf2\x98\xc9\x7e\x1b\xc1\xf9\x56\x66\x0e\x69\x7f\x47\x2c\xcb\xdd\xbd\x19\xed\x5a\x36\xdb\xee\x75\xea\x2e\x1f\x65\xbe\x8b\x6e\xd2\x47\x20\xd7\xe6\x18\x49\x96\x83\xca\xf5\xcd\x23\x6a\x6e\xca\x2b\x39\x6f\xa3\x6f\xbc\x38\x9d\x52\x1c\x22\xa0\xae\xa8\x20\x8a\xb4\x6e\x64\x39\x6f\x0f\xd8\x2c\x0b\x7e\x3a\x99\x4f\xaf\x26\xa7\x1f\xe7\xd7\xe3\xeb\x6a\x1a\x1c\x4c\xad\xe2\xd9\x99\xfe\xe7\xd5\xe4\x7c\xf6\x7e\x72\xda\x4c\x7b\xfb\x53\xde\x47\xed\x60\x3d\x6d\xf3\x71\x3c\xef\xdc\xc0\xb5\x37\xee\xa8\xc4\xe7\x49\x83\x3d\xce\xf5\x80\x88\x4c\xba\x9e\x7d\x64\xc0\xed\x68\xd4\xce\x78\xb8\xd3\x50\x84\xc1\x02\xf3\x43\xa0\x6e\xde\x3c\x73\x65\xf7\x5e\x33\x39\x98\x2e\x81\x80\x34\xa7\x77\x20\xe4\x28\xcd\x81\x0b\x73\x5a\xa4\x2c\x08\x6e\xa4\xe6\xf9\x26\xfb\x94\xe4\x45\x77\xcf\x65\x06\xcc\x81\x92\x98\xdf\x61\x08\x8c\x03\x0a\xc1\x05\xc4\x28\xa5\x6b\x2c\x50\xa5\x82\x35\x8b\xe7\xf4\xf3\x76\x9b\xc9\xa7\x39\xe9\x43\x97\xe6\xc4\x5f\x76\x90\xe9\x76\x7c\xf1\xe1\x76\x50\xd2\x01\xa2\x94\xb6\x42\xb2\x9b\x76\xed\x2e\x2c\x42\x15\x36\x94\xb6\xa0\x33\x9d\xa8\x51\x6e\x3d\x83\xcf\xd3\x4a\xcd\xf2\x65\x1a\x39\xb3\x6f\x8e\xb0\xb1\x55\xed\x6c\x8a\x4d\xcf\x1a\xe4\x8a\x43\x46\xca\x5b\x7f\x07\x7a\x86\xed\x84\xc8\xaa\x9a\x96\x37\xa1\xc6\x17\x1f\x3c\x9b\x50\xe7\xe3\x8b\xf1\xbb\xc9\xd5\xae\xfd\xa7\xf1\xc5\x87\x01\x8c\x2f\xaf\x07\xf0\xe1\xe6\x7c\x00\x3f\x7d\xb8\xbc\x9c\x5c\x0d\xe0\xdd\x6c\xd6\x5f\x25\x3d\x43\x79\x5a\x8d\x2f\x3e\xf8\x7e\xbd\xbc\xf6\xfc\xfa\xe1\xe6\xdc\xf3\xab\x45\xce\xf3\xe2\xdd\x6c\x56\xfb\xd5\x9f\xac\xdc\x47\x89\xbd\xa1\x6e\xcf\x04\x65\xae\x51\xe3\xb2\x3a\xd8\xe8\x20\xf2\x79\xfd\x96\x53\x69\xd9\x41\x2a\xbd\x02\x58\x6c\x8b\x38\xa2\xac\x8e\xde\xfc\x2e\xe4\x27\xd1\x2a\xa7\xce\x88\xaa\x74\x75\xa7\xa4\x4a\x51\x86\x2d\xab\xd8\xbf\xfe\xe3\xb2\x72\xf0\xb8\xc1\x29\xcd\x9f\xac\xc2\x23\xbb\x8c\xa2\x74\xe7\x43\x01\xb4\x47\x81\x4a\x77\x8d\x73\xa5\xba\x39\xe2\x69\x98\xfd\xe9\xf8\x30\x6a\x62\x9a\xd7\x38\x37\x38\x69\xcf\x8b\xd7\xc6\xb7\x07\x3e\xf7\x0f\xc4\xae\x6c\xbf\x36\x16\xcd\x12\x7b\xe4\x47\xcb\x4c\xe4\x62\x32\xd7\xa5\x64\xc6\x80\x33\x17\x86\x7d\x81\xd1\x57\x93\xc2\xce\xf2\x0f\xa1\xe8\x92\x78\x53\x04\x5d\x8b\xbd\x46\x69\x81\x83\xb2\x4b\x55\xad\x14\x48\x20\x77\x84\x46\xe6\x72\x17\x5b\xb3\x6d\x7c\xa4\x5b\x53\x48\x85\x89\x2c\x0e\x35\x6b\x6a\xda\x12\x1c\x3b\xd7\x82\xfd\xd6\x81\xad\x6b\xc0\x1d\x87\xc4\x76\x4e\x42\x3b\x5f\x76\xa5\x3d\xc0\x9e\x06\x9c\x32\x89\x41\x6b\x59\x77\x86\xe0\x82\xf3\x08\x89\x7f\x37\xb8\x39\x57\x65\xb8\x2d\x5d\xaa\x41\xce\xa9\xf5\x76\xd2\xd6\x8c\x46\x12\x87\xf0\xa3\xf6\xac\xe6\xdf\x83\x4c\x15\xed\x51\x9a\xb4\xfd\x40\x1d\xd8\xaa\xd3\xc2\x98\x4a\x93\xdc\x09\x33\x6d\xca\x44\xd1\x11\x75\x85\x31\x57\x1a\x5f\x6b\x70\x65\x4b\x85\x89\x7e\xb2\x93\xcc\xf9\x41\x9e\x81\x31\xbc\x9c\x45\x5b\xcd\x66\xc5\x03\x1e\xb9\xf2\x62\x25\x08\x93\x09\x17\xea\x28\x22\x5b\x4f\x70\x98\x3d\x86\x3d\x54\x6d\x0d\x55\xe6\x56\x1e\xbb\x1a\x7d\x77\x32\x37\x05\x49\x56\x26\x60\x85\x0c\xed\x22\x0d\x58\x1a\x2f\x50\x18\x5c\x5a\xa1\x96\x71\x1c\xfa\x6b\x4c\x56\xc1\x8e\x6c\x44\xc7\x91\xc5\x66\x02\x26\xf0\x27\x5f\xf6\x11\xdd\x76\x18\xcd\x4d\x58\x73\x58\x51\xdb\x7b\x98\x2b\x2e\xb4\x73\xcb\x26\xb5\x2d\xbb\xb0\x4b\x0b\xf4\xb3\x48\x83\x4f\xe8\xa9\x52\xab\x73\xa6\x33\x4f\xe3\xe1\xce\x5b\x03\xb7\xa3\x7d\x55\x0d\x6c\xf3\xbc\x00\xd6\x47\xad\x9d\x9b\x36\x5a\xed\xf3\x8e\xde\xa1\xb9\x7b\x2a\x2b\xe2\xbf\xb9\x3a\x3b\x86\xdb\xb5\x52\x89\x3c\x1e\x8d\xa4\x05\x35\x5c\x19\xf8\x24\xa1\x72\x18\xf0\x78\x14\x6f\x8f\x2c\x1b\x46\x4b\xce\x47\x0b\x22\x5e\xfc\xe3\xcd\x3f\xbf\xfe\xe6\xdb\xef\xda\x92\xed\x8e\x31\x6b\x7b\x0e\x3c\x45\xd8\x98\x15\xe9\x02\xe1\x36\x87\x75\xdb\x22\x83\x65\x6e\x75\x1d\x62\x2a\x9e\xb2\xc3\x77\xcc\x18\xed\x64\x6f\xbb\xcb\x2f\x3f\x85\x8a\xed\x96\x00\xca\x14\xfa\x96\x7c\xa5\x11\x5d\xbd\x37\x65\xea\xdb\xaf\xfb\x8b\xca\xbb\x1c\x89\xbe\xe2\x72\xee\x6c\x52\x5e\x39\x4c\x97\xd6\xa4\x7f\xa4\xce\xf6\xea\x60\xd4\x9a\xd3\x6e\x79\x69\x98\x98\xcf\x95\xc0\x3f\x52\xd6\xb2\x9e\xed\x18\x59\x8c\x1f\x5f\xbb\x67\xdd\x9e\xa8\xae\xdd\x17\xa5\x85\x43\x17\x67\x61\xdc\x66\x0e\xed\x93\xdb\x77\xf8\x79\x8d\x02\x7f\x81\x57\x19\xb7\x03\x0d\xce\xf1\xda\xf0\xd9\xb1\x7f\x14\xf2\x40\x8e\x18\x89\x29\x5b\xbd\xb0\xa3\x98\x1b\x6f\x5e\x7f\xe1\x66\xc4\x41\x68\x33\x22\x6d\x09\xef\xfd\xb2\xea\x9d\x49\xef\xd6\x95\xe0\x34\xcc\xa6\x32\xf3\x3f\x03\xd8\xac\xa9\x2b\xcd\x77\x51\x7c\xb7\x3a\xeb\x00\xc2\x56\xfa\xbb\x80\xb4\x28\x99\xd3\xfe\xd1\x24\x77\x72\xdb\x38\x2c\x22\x60\xa0\x1d\x30\x4b\x9d\x2b\x49\x1d\x73\xc3\x0d\xf5\x26\x5d\xc0\x64\x47\x7c\x99\xd1\x32\x33\xf7\x09\x0a\x6c\x60\xf5\xd9\x71\x41\x27\x98\x5a\x68\x60\x6c\x38\x0d\x1c\x29\x8f\x12\x14\x64\x41\xe1\xe3\x1b\x8e\x93\x5d\xe1\x66\x0f\x4b\x7f\x5b\x35\xf5\x66\x47\xe3\xd6\x18\xfb\x0e\x4b\xa8\x9f\xf9\x0f\xe3\x37\xdf\x7c\x9b\x53\xa7\x97\x41\x6b\xbc\x77\x6e\x6b\xa0\xe3\xe7\x80\xc7\x7a\x45\x9c\x6d\x05\xec\x0c\x8e\xc1\xe6\xaf\xca\xca\x90\x5d\xc7\x54\x1a\xc6\xa6\x09\xed\x41\x34\x73\x76\x4c\x99\xdc\x23\x7b\xd9\xed\xc0\x8b\xf3\x2e\x45\xf4\xae\x97\x06\xc5\xd2\xad\xa2\x6d\xb0\x24\xb4\xe5\xde\x8b\xec\x59\xe0\x92\x0b\x04\x91\x32\x5b\x59\x59\x9c\xe4\x37\x6a\xd8\xce\xbd\xd6\x3a\x4a\xfb\x3c\x4c\x16\xda\x6a\x2a\xed\x53\x3b\x60\x73\x35\xb5\x57\x41\x39\x4b\xc3\x61\x89\x19\x77\x32\xe7\x31\xcd\xb6\x06\x3a\x79\x90\x15\x48\x9b\x3b\xa5\x94\xcd\xda\x98\x45\x8c\x99\x9a\x84\xa8\x75\xe9\x88\x63\x76\x4a\x87\x74\xfa\x50\x80\x5f\x33\x20\xbf\x1d\x8f\x46\xbf\x66\x5b\xda\xbf\x35\xac\xf7\x9f\xbe\x1d\xe3\x0c\xcd\x9e\xbb\x31\xa7\xc5\xce\x49\xbe\xdf\x90\x25\x6b\x0d\x20\xf0\xef\xd1\x78\x78\xd6\xd8\xb5\xc9\xb2\x10\x05\x64\x9b\x4c\xab\x5d\x3f\x56\xd9\x9c\xf1\x65\x91\x59\xf5\x4a\x86\x45\xaa\x60\xc3\xd9\xcb\x97\xc6\x7c\x58\x67\x63\x76\x0a\x18\x6e\xf2\x1b\xc0\x86\x70\x73\x79\x3a\xbe\x9e\x9c\xfa\x66\x77\x17\x12\xf5\x21\x2d\x43\xf2\x86\x1e\x88\x25\x2c\x48\x76\xe3\x9d\x43\x65\x60\x42\xd8\xda\x8f\x45\x51\x41\x3b\xcc\xea\xee\x25\x95\x25\x74\xec\xf6\x8f\xf5\x49\x77\x2e\x3d\x5b\xbd\x54\x22\xbf\xdf\xb7\x09\x37\x4f\x3c\xb0\xb0\xb2\xd9\xe0\x2e\x5e\xe3\x22\x73\xe0\xb9\xdf\x75\x36\xd3\x8e\xea\x81\x68\x45\xc4\x4c\xb8\xd6\xd7\xf2\x16\xdb\x7e\x1b\x6a\x1e\xd8\x5f\xd8\x16\x9b\x93\x91\xb9\x36\xaa\x9f\x95\xfb\x9b\x96\x00\x75\xeb\xe8\xd8\xde\x9d\xe7\x52\x7e\x8a\x7c\x42\xab\x62\x0e\x15\x6b\xcc\x4a\xda\x36\xb3\xfe\xc2\x9f\xa6\xa2\x0a\xa4\xe2\x89\x04\xbc\xc7\x20\x35\x1b\x3c\x36\x4e\xd3\xd2\x90\x6f\x9b\x39\xb1\xc8\x6f\xf3\x29\x3b\x23\x9f\x2b\x19\xb3\xad\x03\x63\xd1\x7b\x65\x2f\x93\x30\x57\x33\x6b\x0f\x67\xee\xc9\x33\xb7\x65\xa3\x32\x26\x05\x13\xf9\xda\xe4\x88\xf4\x68\x82\xb7\x68\xfe\x82\x04\x9f\x1e\x54\xa4\x65\x05\xd8\xcf\xdb\xa7\x91\x02\xed\x14\x8f\x1e\x59\x50\x7b\x54\x6c\x72\xaf\x04\x09\xba\x92\x00\xfb\xc7\xc1\xe3\x3a\xe8\x87\x86\xc4\x65\x12\xfb\x02\xad\x28\x86\x6b\x6d\xaf\x7e\x73\x67\xe3\x28\x73\xc6\xaa\x58\x50\x76\xd6\x9a\xf5\x61\x26\x94\x12\xfe\xd3\xd6\xe5\x19\x3c\x38\x5a\x1a\xe7\xb0\xfb\x06\x4d\xd5\xcd\x3b\x1a\x16\xee\x24\xc2\x3b\xc2\x54\x11\x56\x76\x2c\xab\xc0\xdd\x13\xda\xb6\x57\x50\x1a\x59\x51\xd6\x33\x95\xb4\x2f\xed\xa7\x05\xf0\xbe\xc4\x9f\x66\xf3\x69\x6a\x3d\xac\x0c\xe4\xf3\xaf\xf8\x10\x4a\x89\xf7\xee\x84\xce\xe8\x56\x7b\xfa\x33\xca\xd2\x7b\xed\xf6\x6e\x4f\x8e\xcd\x0f\x3f\xda\xbb\x3f\xda\x39\xa2\x5a\x0f\x97\x7e\x0e\x2b\x5a\x4f\x98\xd6\x5a\x3f\x48\x9f\x3a\x8f\x9b\xda\xa7\x1a\x18\x56\x45\x4c\xb9\x82\x0d\xbb\x64\xc9\x58\x9d\x71\xbf\x7b\x6d\xd6\x08\x05\x1a\x27\x4b\xe1\xfd\xf8\x6c\x7a\x3a\xbe\x9e\xce\x2e\x06\x35\xff\x7d\xf2\xc3\xe4\xe4\xdf\x6d\x47\x4f\xed\x53\xed\x30\xb9\xf8\x7e\x76\x75\x32\x39\x9f\x5c\x5c\x7b\x61\x7d\xbc\x9c\xcd\xaf\xcb\xad\xba\x72\xb9\xed\x27\x57\xed\x73\xd4\xa0\xa5\xb3\x71\x41\x66\x67\x33\x0f\xd6\x7b\xb4\x2f\x91\xb6\xef\x28\x0d\xde\x78\x01\x84\xc9\xa7\xd5\xb4\x14\x10\x3c\x9e\x6b\x39\xad\x41\x7e\x0c\xcf\xd2\x13\x66\x45\xf8\x5d\x7b\x09\x04\x42\x5c\xd8\xfa\xcc\x3b\x4a\x0c\xe5\x07\x27\xd2\x44\x78\x87\x13\xd1\xfc\x3b\xe1\x49\xcb\x89\xd8\x87\x08\xca\xf7\x0e\xe2\x63\x08\xc8\x0e\x58\x15\x96\x9d\xf0\xc4\xdc\x26\x6c\x45\x82\x67\xb1\x86\xaf\xa8\xb4\xfc\x1c\x64\xe3\x49\x05\x18\x55\xe2\xc9\x42\xf2\x28\x55\x68\x13\x48\xb5\xcc\x40\x37\xed\x1c\x92\xd4\x16\x2d\x69\x79\xe9\xd8\xcb\xb9\x43\xb1\x11\xb4\x3d\x5b\x0d\x3d\x6b\x16\x3c\xf4\xcf\x32\xd8\x7d\xa9\xff\x71\x8d\x66\xcd\xa7\xb8\xdd\x78\xb3\x8b\x4b\xbd\x64\xd3\x3f\xe5\xa8\xda\xdb\xfd\xbb\xa7\xc2\xaa\xbe\xac\x5c\x09\x64\xb2\xb9\x79\x81\x97\x2d\x7d\x8f\x04\x92\x70\x6b\x21\xca\x8e\x02\xf8\x02\x66\x96\x79\xcd\xd0\x51\xc8\x72\x98\x06\x55\x2a\x21\xe0\x4c\xd2\x10\x85\xb9\x5f\x5b\xa6\x41\x80\xb2\x23\xa6\xd3\x4f\xb3\x62\xa3\xb5\x79\xe9\x8b\x43\x8f\x2f\xb3\x97\x05\xf0\xde\xf1\xda\x89\xa6\x56\xb3\xcf\x68\xaa\x40\x04\x1e\x28\x12\x41\x48\x57\x54\x49\x97\x5c\x15\xe8\x6e\x34\xee\x8e\xa8\x28\x03\x2e\x42\x57\x25\x5b\xf9\xb8\x92\x33\x03\x7c\xc3\xf4\x5b\x73\xbb\xaf\x2b\x1a\xd1\x32\xd3\x09\x34\x95\x28\x64\x5e\x40\x67\x26\xf1\x95\xa4\x31\x8d\x88\x88\xb6\x59\x8a\x89\xa5\xb1\xd9\xf1\x88\x79\x68\xab\x9b\x76\xe1\xa9\x3b\x45\x26\x80\x0f\xd6\x31\x0f\x21\x55\x34\xa2\x6a\xfb\x7a\x08\x13\x12\xac\x2d\xf5\x05\xd9\xda\x74\x1b\xe6\x74\xe7\xd2\xa9\xca\xf6\x8c\xf3\xcf\x7a\x7d\x6d\x7e\x0d\xb8\x10\x28\x13\xce\xdc\x8d\xfe\xce\xbc\x91\xb0\xcc\xa4\x6e\xde\xea\x1e\x6f\x6a\xc0\x64\x06\xca\x6a\xd6\x82\xaa\x41\x2e\xcd\x9c\x99\x1f\x76\x24\xbe\x1b\x90\x6c\x7a\xa7\x3c\x79\xf9\xba\x28\x2f\xbf\xed\xe6\xad\x84\xef\xbe\xf9\x66\x08\x6f\x51\xdb\x00\x53\xa4\xc4\x63\xcc\xb6\x4d\x8d\x24\xd4\xbe\xba\x65\xef\xfc\xed\x04\x4a\xa4\xe4\x01\x35\x99\xd2\x6c\x31\xa0\xb9\x37\xb0\x84\x5b\xa2\x1d\xe6\xc7\xf0\x9d\xe5\x6c\xfb\x25\x86\xf6\xc9\xdb\x7f\x93\xb7\xb7\xe0\x8e\xe1\x5b\xfb\x0b\x37\x37\x9a\x7d\xed\x0f\xf4\xb5\x20\x4e\xee\x31\x78\xdc\xe0\x44\x43\x7c\xac\xe0\xa4\x03\x56\x2d\x1d\x62\x18\xe1\xd2\x21\xce\x3d\x73\x61\x8e\xe1\x45\x9d\x1e\xa8\x57\x7c\xa2\x7d\x01\x86\x93\x7b\xaa\x4e\x78\xd8\x5d\xd6\xb2\xeb\xbc\x9b\x97\x6d\xe3\xda\x00\xbd\xd3\x01\x25\xa3\xfd\xf3\x57\xbf\x94\x4b\x50\x93\x6c\xe5\x69\xcb\xf3\x3b\xc5\xc8\xca\x63\xa9\xcc\x58\xf0\x95\x20\xb1\xdb\xb0\xd6\xdd\x6d\x25\x6b\x48\x03\x9b\x1b\xef\xe1\x51\xf6\x38\x79\x57\xee\xd0\xe7\xfc\x9d\x7d\x3a\x4f\xe1\xd9\xa7\x6f\x85\x4f\xff\x1a\x9f\x86\x08\x77\x77\x20\x62\xf5\x3b\x08\x8b\x58\xf5\x16\x90\xb1\x58\xa5\xf6\x53\x47\x36\x83\x9e\x10\x29\x8b\x33\x61\xf9\xf6\xb3\xb5\x23\xe6\x03\x24\x4f\x62\x4a\x77\x06\x15\xfd\x0f\x59\xda\xe7\xcb\x5a\x81\xec\x5e\x77\xf4\x5a\x5b\x18\xe3\x77\x49\xd4\xfa\xf1\x69\x3a\xcb\x40\xef\x43\x52\x6d\x35\xb1\x2c\x82\x21\xb7\xb2\x30\x08\xef\x8e\xa9\xcd\xb1\x1a\x3f\xd9\xb1\xa4\xbf\x4f\x7a\xe6\xbc\x0a\xf8\x31\xfc\x5b\x3f\x90\x2d\xc9\x19\x06\xe7\xf3\xe9\x5f\xd7\xb1\x5d\x59\xaf\x13\xe8\x3e\xee\x4b\x43\x99\xf7\xc9\xbd\x54\xc7\x36\x6f\xf1\x54\x4e\xb1\x65\x3b\xd5\xce\x7d\x2d\xd3\x28\xda\xea\x28\xcf\x84\x84\xa9\xc8\xce\x68\x75\x57\x9b\x58\x57\xfb\x24\xcc\xe4\x9f\xea\xf9\xbe\x24\x83\xfa\x87\xa6\x74\x96\x11\xf9\x1d\xbc\xfe\xf7\x1a\xea\x3e\xd6\xd6\xa0\xa1\xa5\x35\x95\x68\x8f\x35\x56\x36\xcd\xd1\x18\x90\xde\x12\xff\x7c\x44\x9f\xff\x02\xaf\xe8\x10\x87\xe5\x82\x48\x33\xca\xeb\xbf\x61\xd0\x20\x92\xf8\xf7\x71\x34\x57\x55\xc0\x8f\xe1\x68\xfa\x81\x6c\x75\x34\x22\x89\x8b\x6d\x00\xa3\x00\x49\x9c\xe5\x1a\x0e\x59\xdf\x26\xc2\x3b\x4c\x84\x1d\xf3\x2a\x7d\x44\x99\x99\x67\x20\x1f\x43\x5a\x76\x01\xab\x72\x2d\x65\x12\x02\x1e\xc7\x84\x85\xf6\x1a\x4b\x90\x6b\x8c\xa2\x43\x50\xd2\x0e\xf3\x10\x94\xfc\x59\x41\x89\x1e\x5a\x24\x02\x95\xef\x9e\x88\x3a\x9e\xfb\x9a\x9c\x69\x01\xbc\x4f\xa7\xbd\x54\xb1\x04\x7b\xaf\x42\x06\x53\xec\x69\xfe\x2e\xd3\x9e\x05\x05\x8a\x83\x48\xdd\x01\x5a\xd3\xa8\x7b\x87\x60\xba\x04\xc6\x2b\x70\x68\xe9\x0b\x69\x25\x30\xfa\x77\x97\x99\xec\x4e\x5f\xda\xea\xa4\x68\x9b\x9d\xb4\x88\xe8\x27\x8c\xb6\xf6\x10\xa6\x51\x87\xac\xa0\xd3\x80\x75\xc7\x31\x7f\x96\x6b\x5c\x90\x1d\x8e\x3c\xa2\x0c\xe5\x2f\xf9\x39\x1a\x64\xc3\x0d\xfd\x44\x13\x0c\x29\x19\x72\xb1\x1a\xe9\xbf\x46\x73\x0b\xe8\xe3\xab\x1b\x46\xef\x5f\x77\x46\x31\x9e\x4a\x8e\xe9\xc5\xf5\xe4\xea\xf2\x6a\x72\x5d\xbb\xaa\x01\x2e\x66\x17\x93\x01\xcc\x7f\x98\x9c\x9d\x75\xa7\xc3\x2f\x67\x3f\x4e\xae\x4c\xbb\xcf\xab\xc3\x68\xc1\xa4\xb3\x8f\x46\xb2\xb3\x81\xc1\xab\xb3\x45\x81\x7e\x6b\x33\x3b\x71\x8f\xaf\x6b\x56\x31\x1e\xe6\xda\x8d\x7f\xca\x24\xd5\x66\xc5\x32\x61\xad\x4b\xc0\x1f\x7b\x17\xc5\x4d\xe3\x0e\xaa\xe2\xc6\x85\xac\xba\xd5\x68\x47\x7b\x61\xb3\xd5\x10\xf7\xc9\x33\x7b\xb1\x45\xbd\xb6\xbb\xf4\x29\xcf\xa1\x41\xcf\x9c\xc2\xf6\x40\x2c\x6e\x54\x54\x3c\x43\x6a\x9b\x9f\xd9\xcc\x76\xd5\xd6\x18\x25\xf6\x46\x14\x6d\x11\x62\xca\xcc\x5a\xa3\x65\x43\xac\x74\x83\x16\x45\x69\xcf\x32\x15\x57\x66\xd9\x6b\xbb\x62\x24\xcc\x79\x57\x81\xff\xd1\x1d\x4a\x05\xda\x1e\x98\xf9\xb7\xb2\xb3\xd2\xee\x7c\xdb\xca\x9c\x90\xca\x3f\xee\x9c\x15\x9f\xdb\xb2\x77\x5b\xaa\x0d\x92\xfa\xf7\xb1\xcd\x67\x9e\x9b\x9f\xf5\xae\x20\x6c\x8e\x9a\x14\xdf\x92\xaf\xcb\x4e\xa9\x0c\xf7\xb3\x6a\xa5\x6f\x3a\xca\x79\x7b\x95\x4a\x1b\x44\xfe\x88\x42\x69\x57\x39\xef\xfb\x20\x29\x54\x07\x7f\x50\xc9\xb4\x4f\x44\x2b\x45\xd4\x0f\x2f\x99\xa6\x5d\x05\xc7\x4f\xa3\x66\xba\x4b\x4c\xfe\x1a\x25\xd3\x25\x0a\x0f\x15\xd3\x5f\xe6\xd2\x16\xbe\xc0\x82\xa6\x43\xc5\xf4\xfe\xea\x74\x28\x98\x3e\x14\x4c\xef\x1c\xe5\xaf\x52\x30\x5d\x52\x84\x43\xbd\x74\x1b\x95\x7f\xe3\x7a\xe9\x92\x7c\x1c\xca\xa5\x3d\x84\xfe\xad\xa3\x8b\x43\xb9\xf4\xa1\x5c\xfa\x50\x2e\x7d\x28\x97\x3e\x94\x4b\x1f\xca\xa5\xff\x84\x72\xe9\x5a\x6c\x72\xa8\x96\xee\x24\xfb\x50\x2d\xdd\x6d\x6e\xfe\xec\xed\xd9\x43\xb5\xf4\x13\x28\x7c\x82\x2f\x6e\x01\x72\xa8\x96\x6e\x7b\x9e\x7a\xb5\x74\xc9\xbd\x1d\x8a\xa5\xdb\x09\x3d\xd4\x25\x3d\x79\xc7\xf7\x25\xd9\xd3\x43\xb1\xf4\xa1\x58\xfa\xef\x55\x2c\x5d\xf2\x33\x87\x5a\xe9\x1e\x14\xff\xad\x6b\xa5\x4b\xc2\x72\x28\x95\x6e\x25\xf6\x10\x92\x74\xd0\xf4\x04\x42\x92\xa7\x50\x2a\xed\xd3\xc4\x43\xa5\xf4\xa1\x52\xba\xed\x39\x54\x4a\x7b\x9e\xc7\xa9\x94\x76\xd5\xb5\x9f\x57\x2c\xfd\xde\x02\x69\x34\x6b\x44\xeb\x95\x8b\xa4\xb5\x6a\x55\xef\xb3\x1e\x66\x90\xf2\xcf\x69\x26\x1e\x06\x28\x0e\x5f\x43\x82\x82\xf2\x10\x24\x26\x44\x18\xd7\x62\x37\x73\x24\xbc\xc2\xe1\x6a\x08\xff\x78\x33\xfc\xe7\xd7\xc3\x6f\xbe\x1d\x7e\xf7\x3f\x25\xa1\xb7\x6e\xe8\x9a\x56\x8b\xc3\x5b\x28\xcd\xec\xb3\xee\x72\xa4\x68\xdc\xfc\xba\x5b\xad\xd4\xf7\xba\xda\x46\x20\x09\x67\x66\x57\xa2\xe6\x85\xaa\x1f\xa1\x4b\x55\x92\x2a\x63\x0f\x86\x70\x46\xa4\x02\x3d\x94\xe5\x4e\xc9\x70\xc2\x86\xc8\xcc\x8b\x56\xbf\x43\xd7\xf1\xfd\xbe\xff\x0f\x00\x00\xff\xff\x9f\x40\xf8\x38\x87\xb4\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x3d\x5b\x73\xdb\xb6\xd2\xef\xf9\x15\x3b\xc9\x43\x92\x19\x59\xea\xc9\xe9\xe5\x1b\x7f\x73\x1e\x14\x5b\x4d\x35\xc7\xb6\x3c\x96\x9d\x4e\xda\xe9\xc4\x10\xb9\x92\xf0\x85\x04\x78\x00\xd0\xb2\x4e\xa7\xff\xfd\x1b\x5c\x78\x07\x29\xca\x71\xdb\xb8\x15\x1f\xda\x58\x04\x16\xbb\x8b\xbd\x61\xb1\x00\x5f\xc0\x09\x4f\xb6\x82\xae\xd6\x0a\xde\x7c\xf5\xe6\x0d\xbc\xe3\x7c\x15\x21\x9c\x9d\x9d\x0c\x61\x1c\x45\x70\xa5\x5f\x49\xb8\x42\x89\xe2\x0e\xc3\xe1\xb3\x17\xf0\xec\x05\x9c\xd1\x00\x99\xc4\x10\x52\x16\xa2\x00\xb5\x46\x18\x27\x24\x58\x63\xf6\x66\x00\xef\x51\x48\xca\x19\xbc\x19\x7e\x05\xaf\x74\x83\xe7\xee\xd5\xf3\xd7\xff\xfb\xec\x05\x6c\x79\x0a\x31\xd9\x02\xe3\x0a\x52\x89\xa0\xd6\x54\xc2\x92\x46\x08\x78\x1f\x60\xa2\x80\x32\x08\x78\x9c\x44\x94\xb0\x00\x61\x43\xd5\xda\x0c\xe3\x80\x68\x3c\x3e\x38\x10\x7c\xa1\x08\x65\x40\x20\xe0\xc9\x16\xf8\xb2\xdc\x0e\x88\xb2\x18\xeb\x67\xad\x54\x72\x3c\x1a\x6d\x36\x9b\x21\x31\xd8\x0e\xb9\x58\x8d\x22\xdb\x52\x8e\xce\xa6\x27\x93\x8b\xf9\xe4\xe8\xcd\xf0\x2b\xdb\xe7\x86\x45\x28\x25\x08\xfc\x4f\x4a\x05\x86\xb0\xd8\x02\x49\x92\x88\x06\x64\x11\x21\x44\x64\x03\x5c\x00\x59\x09\xc4\x10\x14\xd7\x18\x6f\x04\x55\x94\xad\x06\x20\xf9\x52\x6d\x88\xc0\x67\x2f\x20\xa4\x52\x09\xba\x48\x55\x85\x5d\x19\x7e\x54\x56\x1a\x70\x06\x84\xc1\xf3\xf1\x1c\xa6\xf3\xe7\xf0\x76\x3c\x9f\xce\x07\xcf\x5e\xc0\x8f\xd3\xeb\x1f\x66\x37\xd7\xf0\xe3\xf8\xea\x6a\x7c\x71\x3d\x9d\xcc\x61\x76\x05\x27\xb3\x8b\xd3\xe9\xf5\x74\x76\x31\x87\xd9\xf7\x30\xbe\xf8\x00\xff\x9e\x5e\x9c\x0e\x00\xa9\x5a\xa3\x00\xbc\x4f\x84\xc6\x9f\x0b\xa0\x9a\x91\x76\xf6\xe6\x88\x15\x04\x96\xdc\x22\x24\x13\x0c\xe8\x92\x06\x10\x11\xb6\x4a\xc9\x0a\x61\xc5\xef\x50\x30\xca\x56\x90\xa0\x88\xa9\xd4\xd3\x29\x81\xb0\xf0\xd9\x0b\x88\x68\x4c\x15\x51\xe6\x97\x06\x51\xc3\x67\x94\x2d\xf9\xf1\x33\x00\x45\x55\x84\xc7\x30\x9b\x9f\x70\xb6\xa4\xab\xd1\xbb\x14\xa5\xba\xe4\x11\x0d\xb6\xcf\x00\x42\x94\x81\xa0\x89\x86\x72\x0c\xd7\x6b\xcc\xdb\x41\xa9\x1d\x08\x94\x3c\x15\x01\x3e\x03\xb8\x3f\x0a\x83\xe8\x48\x2a\x91\x06\xea\x88\x91\x18\x8f\xa1\x0a\xd1\x36\x58\x13\x79\x44\x49\x7c\x0c\x4b\x12\x49\x7c\x96\x10\xb5\x96\x1a\x9b\x15\x2a\xfd\x3f\xcf\xc0\xcb\x94\x05\xfa\x2f\x2d\x8a\x66\x2e\x57\xa8\x25\x70\xc9\x45\x6c\x88\x04\xb2\xe0\xa9\x02\x52\x1b\x0e\x20\x21\x82\xc4\xa8\x50\x48\x0b\xf9\x08\x7c\x68\xe9\x27\x93\xa2\x63\x50\x22\x45\xf7\x63\x05\x91\x31\x2c\xd3\x28\x02\xca\xa4\x32\x32\xcf\x97\x8d\x01\xb5\xf8\x6d\xfb\xd2\x60\x1a\x7f\x89\x54\x84\x18\xa1\xc2\xbe\x64\xd8\xd6\x5f\x0a\xd6\xe3\x28\xda\x17\xf1\x28\xea\x8d\x7a\x22\xf8\xff\x61\xa0\xba\xd0\x96\xc1\x1a\x63\x72\xec\xfe\x02\x50\xdb\x04\x8f\x41\x5b\x10\xb6\x7a\x06\x10\x51\xd9\x5b\xc8\x75\x5b\x8f\x7c\xc4\x84\x6d\xff\x30\x8c\xb5\x8d\xe7\x0c\x99\x32\x70\x6d\x53\x37\x44\x09\x85\xac\xb3\xb3\x27\xcd\x19\xb6\x8a\x4f\xc3\x1c\x21\x39\xfa\xf5\x57\xf7\xcf\xdf\x7e\x1b\xad\xf2\x0e\x14\xf5\x1b\x8d\xfb\x6f\xbf\x55\xfa\x26\x44\x20\x53\x47\x01\x67\xda\x9b\xa0\xa8\x93\x56\x98\x96\x40\x20\x51\x58\x21\xd1\x6b\x77\xca\xaf\x04\x92\xf0\x48\xd1\x18\x79\xaa\x8e\xe1\xab\xca\x3b\xa3\xa7\x6d\x2f\xad\x10\x35\xdf\x5a\x26\xf2\x85\x8f\xf9\xee\x6f\x3b\x45\xf9\x1f\x55\x72\x12\xc1\x13\x14\x8a\xa2\x2c\x26\x86\x48\x49\x57\x2c\x46\xa6\x8a\xdf\xbc\x23\x15\xe8\xad\xb8\xb3\xc3\xe3\xbc\xaf\xaf\x8d\x85\x51\x9a\x36\x6f\xf3\x8a\xc4\xce\xad\x3f\x42\x69\x3c\xcb\xfb\x73\xfd\x7f\xa2\x80\x08\x74\x78\x6a\x19\xd6\x41\x43\x62\x00\x0e\xe1\x5a\xff\x41\xa2\x88\x6f\x64\x09\x26\x98\x48\x43\x71\x50\x44\x68\xa3\x2e\x51\x19\x8f\xb8\x12\x3c\x4d\xa4\xd6\x73\x0d\x7a\xb1\x85\x90\x2e\x97\xa8\x45\xa0\x24\xea\x20\xd3\x60\x0d\x44\x42\x44\x16\x18\xc9\x41\x05\xae\xa6\x5b\x0e\x60\x36\x1f\x68\x78\xff\xe5\x0c\xe5\x10\x26\x71\xa2\xb6\x25\x46\x4a\xd8\xd0\x28\xca\x06\x1f\x9f\x9d\x99\xe1\x8c\xc7\x64\x48\xd4\xba\x02\xb1\x42\x8e\xf6\x84\x11\x0d\x14\x9c\x13\x46\x56\xa8\x81\x41\x26\xc1\x96\x15\x78\xaf\xf5\x77\x4d\x57\xda\xd9\xa7\x89\x8e\x3f\xd4\x1a\x2b\x10\x33\xdf\x09\x6b\x8a\x82\x88\x60\xbd\x85\x57\x41\xc4\xa5\xf6\xd7\xdc\x30\x76\x26\x56\xaf\x2d\x8e\xda\xdb\x0b\x1a\xea\xe8\x80\x4b\x84\x88\x6f\x50\x40\xc8\x37\xac\x02\x91\x9a\xf0\x4a\x98\xc0\x45\x47\x5c\x16\xc9\x21\x8c\x95\x8d\x22\x48\x8c\x10\xe1\x1d\x46\x0e\x1d\x2f\x0a\x14\x87\x15\xa0\x3a\xba\x33\x11\x9c\x93\xd1\xd7\x03\x0b\x0c\xc5\x1d\x35\xc1\x5f\x14\x41\x22\xf0\x4e\xf3\x40\xbf\x30\x1a\xa8\x0d\x16\x5f\x42\x9c\x46\x8a\x26\x11\x5a\xc6\x51\x94\x35\x96\x12\x95\x63\x69\xa3\x48\x24\xc1\x1a\xb8\x26\x62\x08\xd3\x8c\x1a\x2d\x56\x0d\x48\xb6\xb7\x0d\x8c\xb6\x35\xb0\x8e\xd4\xc0\xc6\x2b\xaf\x70\x35\x84\x84\x04\x9f\xc8\x0a\x8b\xe0\x0f\x04\x06\x34\xc1\x01\x08\x4c\xb8\xa4\x8a\x8b\xed\x00\x50\x05\x43\x4b\x5f\x05\x62\x85\x56\x64\x32\x15\x68\x87\x67\x1c\xde\x9f\x43\xc0\xd3\x28\x84\x84\x2b\x64\x8a\x92\x28\xd2\x61\x51\x80\xf4\x0e\x8d\xcb\xd2\xf1\x90\x0e\xc5\x2a\x10\x97\x82\xc7\xb0\xe0\x6a\x9d\xd3\x33\x84\x6b\x6e\x79\xb7\x8b\xd8\x42\x1b\x2a\x20\xef\x6c\x4c\x2f\xad\x7b\x74\xf4\x6a\xe9\x2f\x94\xc7\x32\x44\x9a\xb0\xb2\xf8\x75\x96\xa0\x20\x3a\x32\x86\xf9\x56\x2a\x8c\x6b\xaa\x64\xa6\xc4\x0a\x3e\xc4\xa9\x54\xb0\xd0\x08\xaa\xd4\x10\x8a\xf7\x41\x94\x4a\x4b\xaa\x66\x1b\x15\x4e\x9d\x34\x38\x12\x04\x5c\x84\xfa\x5f\x8a\x57\x40\x3a\x8d\xcd\xd4\xd3\xcc\x37\x04\x82\x2a\x14\x94\x0c\xe1\xb4\x81\xb0\x9e\x2f\x1a\x6a\xf6\x2e\x29\x86\x06\xff\x4c\x6e\xaa\x7c\xa5\xac\x44\xd8\x86\x6c\xe5\x10\x2e\x2d\x27\x1a\x40\x16\x5b\x87\xb0\xb6\x15\x3a\x72\x36\x82\xe3\xf8\x56\x01\x1a\x1b\x25\x17\xaf\xe4\x6b\xdd\x64\xeb\x28\xcc\x21\x17\x12\x44\x3b\x46\x49\x19\xfd\x4f\x5a\x85\x4b\x43\xd8\x58\x01\xcf\x17\x2e\x43\x98\xf3\x38\xc7\x22\x1b\x59\x42\xc8\xd9\x4b\x05\x6b\x72\xa7\x63\x2d\x0b\xaa\x18\x45\x54\x45\x8b\x8b\x1a\x46\x2c\x1b\x47\x4b\xd1\x4b\x6b\xb3\x03\xa2\xd7\x81\x8c\x3b\x60\x4c\xaf\x45\xa8\x84\x3b\x12\xd1\x90\x28\x0c\x2b\x20\x2d\x11\x99\x1a\x0c\xe1\x82\x2b\xa7\x00\x74\x09\xb3\x39\x4c\x99\xd6\x7e\x2e\xb6\x6e\xc5\xa4\x29\x09\x07\x40\xb4\x72\x18\x9d\xd1\x0b\xc9\x45\x95\xf8\xdc\x4f\x90\x4c\xb8\x0c\x40\xcb\x5b\x63\xf4\x67\x73\x58\x60\x40\xec\x02\xb4\x66\x70\x24\xda\x55\x69\x05\xe4\xfb\xf3\x97\x52\xf7\x22\xda\x88\x7f\x62\x7c\xc3\xca\x96\xcc\xe7\x54\xf5\x63\x9c\xcd\x99\x91\xc8\xea\x8b\xcc\xbb\x12\x21\xc8\xb6\xf6\xa6\xe6\x60\xdf\x15\x30\x6a\x0d\xab\x81\x9e\x23\x2e\x8b\x64\x25\xc4\x44\x05\x6b\xa3\x2b\x0a\x22\x24\x52\x01\x67\xe8\x96\xc9\xda\xce\x6b\x90\x35\x88\x60\xdc\x64\xc5\xa5\xea\x65\x69\xe1\xd6\x4a\x0e\x35\xa4\x52\x3b\x4c\x85\xce\xa5\x0e\x8c\x74\xe0\x3d\x89\x93\x08\x1b\x70\x9f\x23\xbb\xfb\x57\x22\x78\xa8\x95\x52\xff\x5b\x2a\xb2\xa2\x6c\xf5\x7c\xe8\xa5\x5e\x22\x0b\x8f\x50\xfb\xd4\x4a\xb8\x55\x6d\xa4\xc3\x58\x17\x61\xe8\x7f\xd6\xda\x50\x6d\x6e\xea\x5c\x6f\x8d\x6a\x6a\xac\xef\x88\x5b\xda\xe7\xa3\x5d\x0e\xec\x13\x79\x05\xa1\x07\x5a\xfa\x21\x61\x48\xf5\x4c\x93\xe8\xb2\x73\x10\x68\x2e\x0d\x9a\x4f\x4d\xc4\x5a\xa8\x81\xba\x8c\xb9\x1c\xd1\x09\x8f\x93\x54\x21\x4c\xd8\x8a\x32\x2c\xd6\x4e\x96\x3e\xab\x6b\xce\x9a\xb7\x20\x98\x08\x94\x5a\x9a\xb4\xc4\x10\x56\x40\x50\x5c\x7b\x00\xca\x82\x28\x0d\x31\xb4\x76\x5f\x4b\x62\x21\x7f\x46\xd6\x86\x35\xbb\x6c\x7b\x5f\x90\x18\x2f\x05\x2e\xe9\x7d\x93\x33\xbd\xb5\x6d\xea\x01\xd6\x47\xed\xde\x9f\x97\x34\x6f\x63\x02\x29\x63\xfb\xa5\x22\xc2\x04\x83\x6a\x5d\x51\x3f\x8f\xe8\xd8\xc1\x86\x70\x46\x3f\x61\xee\xc6\x0c\xfd\x9a\x05\xcc\x7a\xb2\x0d\xd9\x9a\x34\x85\x66\x43\x7d\x50\x64\x4d\x69\xcf\x1d\xa6\x73\x77\x15\x2d\x75\x83\xfe\xeb\xb9\x56\xcc\xa3\xe7\x43\x98\xb1\x68\x0b\x32\x4d\x12\x2e\x94\xf5\x85\x3e\x11\xd7\x32\x7a\x64\x63\xbd\x3c\xc2\xf8\x53\xb4\xb8\x45\xc2\x6b\x5a\xec\x69\x95\x73\xed\xb3\xe5\xa4\x97\x70\xe8\x75\xb5\x4b\x4f\x16\xf3\xe5\xf2\x6e\x18\x0e\x0b\x60\xc6\xc3\xe7\x2f\x1a\x84\xe5\x0e\xff\xe6\x6a\x9a\x85\xd9\x7a\x11\x0f\xb7\x66\x15\x32\xfa\xf9\xa7\xd9\xc5\xe4\x97\x51\x3e\xc6\xe8\xe7\xe9\xc5\xfc\x7a\x7c\x71\x32\xf9\x78\x31\x3e\x9f\xfc\x72\x5b\x9f\x26\xc8\x87\x2e\x49\x0a\xd5\x2e\x2e\xe0\x71\x6c\x72\x92\xa1\xfe\xbb\x10\x09\xc5\x61\x49\x02\x1a\x51\x45\x8c\x9f\x6e\x0a\x72\x5c\x2c\x5b\xf8\x12\x82\x35\x61\x3a\x40\x72\x6e\xbe\xc8\xb3\x88\xf2\xda\x2c\x4b\x41\x96\x05\xba\x01\xd8\x44\x96\x5a\xea\x78\x62\xa0\x9b\xb0\x0a\xa5\xc6\xd9\x27\xb9\x2d\x92\xda\x00\xab\xc1\x18\x6b\x25\xd0\xc4\x76\x41\x79\x92\xdc\x02\xc5\x2e\xd0\x2c\xbc\x27\x26\xea\x50\x4a\x45\x38\xfa\x3c\x83\x1c\xe5\xcb\xb5\xe3\xcc\xbc\x8f\x32\xd1\xf0\xd8\xf0\x25\xc5\x28\x3c\x06\x89\xd1\xf2\x8c\xb2\x4f\x95\x16\x5c\x5e\x6f\x93\xcf\xd0\xac\xd9\xdc\xf4\xdf\xdb\xe8\x76\x85\x3b\xb0\xe4\x3a\x98\xf1\xf1\x66\x36\x37\xa8\xc9\x61\x0d\x9a\x96\x08\x03\xd2\xe4\xf1\xb4\x64\x45\x3a\xe6\xce\x56\x12\xd6\x75\xc1\x8a\xde\x79\xac\xee\xcc\xb0\xa0\xe6\xcd\x9e\x5c\xa0\xe3\x9f\x88\x5d\x41\x0e\x97\x63\x11\xac\xa9\xc2\x40\xa5\x02\xbb\x82\x9d\xfe\xf1\xc9\x6c\x5e\x86\xb9\x3b\x4e\xf1\x3b\x65\xed\x82\x2b\x0b\x0a\x64\x66\x31\x61\x76\x36\x7c\x30\x41\x2f\x8a\xcc\x12\xb3\x2c\x41\x66\x15\x50\x42\xa7\x69\x53\x35\x13\xe6\x6b\x2e\x94\x0e\x22\x1e\x8b\x03\x39\xc0\x2f\x80\x7c\xa9\x71\x31\xf1\x4d\x35\x9c\x78\x1e\xe2\x82\x12\xf6\x1c\x3c\x71\x83\x7d\x9e\x6f\x28\x0b\xf9\x46\x36\xc2\x7e\xcb\x35\xb7\x77\xf8\x58\x3c\x73\xe0\xfe\x6c\x8e\x55\x78\xe7\x52\x29\x55\xfa\x8d\xfb\x7e\xb0\xc5\xfc\x49\xf7\xde\x6f\x6d\x48\x59\x29\x28\x91\x98\x65\x31\xcf\x90\xdc\x21\x18\x73\x54\xcf\xab\x40\x1e\x4d\x36\xc1\xe8\xde\x43\x8d\x06\x89\x7a\xc6\x11\x4d\xd0\xf5\xb8\xa2\x3d\x8e\x30\xa3\x3d\x1d\x27\x6c\xf3\x6e\xd7\xb4\x6a\x0a\x5a\x00\xda\x2d\x99\x63\x08\x89\xcb\xfd\xb7\x27\xde\x4f\x72\xb8\xa5\x36\x02\x49\xa8\x43\xa1\x06\xfd\x15\x79\x98\xa5\x2a\x49\xb5\x7f\x8c\xb6\x43\xd0\x10\x6c\x80\x53\xde\x7c\xdd\x10\xe9\x30\xaf\x38\xae\xfb\xa3\x4f\xe9\x02\x05\x43\x85\xf2\x88\xc6\x71\xaa\xb4\x46\xd4\x46\x2b\x8f\xb5\x9b\xe4\x1a\x59\xa7\x45\xe7\x36\xf4\x4b\x4d\x32\xff\x5e\x42\x5d\x4b\x31\x5b\xe9\xe5\x96\x7d\x55\xea\x0a\xb5\xa4\x8e\xd9\xcc\xb6\x41\xed\x3f\xbe\x7a\xf3\xb5\x16\x32\x41\x02\x85\xa2\xb4\x9e\x41\x45\x56\xfb\x53\x31\x51\x64\xb5\xf7\xb4\x5c\xaf\xd1\x0c\xe7\xb6\xe6\xab\x33\xe2\x72\xd6\x76\x35\x98\x08\x7e\x47\x43\x5b\x36\x90\x26\x5a\x58\xaa\x79\x55\xaa\xca\x01\x4c\x96\xe1\x42\xf1\x52\x9a\x01\xf6\x9e\x51\x46\x7a\x49\x6f\x8d\x09\x35\x6f\x55\xa1\xf5\xe5\x8d\x4d\x34\x9a\x45\xb2\x9b\xa9\x7c\xc7\xa0\x16\x71\x43\x2a\x0b\xa3\x2a\xe2\xaa\x76\xde\x16\xfb\x7e\xee\x5f\x1f\x69\xd8\xd8\xf8\x33\x7f\x7e\xb4\xc9\x40\xfd\xfe\x76\xf8\x32\x87\xe2\x12\xa2\x57\xa5\xbc\x66\x93\xd6\xba\x19\xae\x91\x7a\xd9\x84\xd1\x46\xf9\x19\x95\xc6\xa2\x25\xf5\x0c\xef\xd6\xad\xd1\x53\xe1\xea\x2b\x8a\xed\x2e\xee\xf6\xc2\xbc\x8b\xd8\x61\x03\xad\x0e\x3b\xb8\xcb\x06\x7a\xec\x5f\x6b\x20\xd9\x1e\x44\x76\xb3\xa3\x2b\x84\x24\x89\x7a\x68\x28\xeb\xf6\x23\x93\x7d\xa2\x5d\x0f\xa2\x3e\x00\xd5\x92\x01\xa6\x07\x81\xbc\xcf\xb6\x19\xcb\xd8\x01\xf3\x6d\x04\xdf\x7a\x6b\xc5\x9b\xfe\xf5\x08\xb6\x69\xec\xf9\xf5\xbf\xdb\x24\xc1\x66\x48\x55\xdf\xf7\x2d\x77\x49\x05\xf5\xfc\x9a\x57\x1c\xf9\xa2\xa2\xee\xb8\xde\x84\xbc\x77\xa8\x97\x04\x8f\x13\x9e\x8d\x0b\x80\xdd\xcd\x7b\x4e\x5a\x09\xde\x84\x79\xd8\x08\x0d\x13\x64\x16\x69\x7c\x99\x91\x66\xaa\xd0\x64\x6e\x7c\x0a\xbd\x1c\x1a\xcb\x1c\xe2\x92\xa4\x91\x3f\x39\x0b\xb0\x40\x1d\xfa\x71\xa1\xad\xf3\xe9\xe4\xed\x10\x2e\xb9\x94\x74\x11\x21\xdc\x91\x28\x45\x79\x0c\xe3\xab\x93\x1f\xa6\xef\x27\x1f\xaf\x3f\x5c\x4e\x3e\xde\x5c\xcc\x2f\x27\x27\xd3\xef\xa7\x93\xd3\x41\x0b\xc4\xd3\xc9\xdb\x81\xfe\xcf\xc7\xf9\xd5\xc9\x4b\x6f\x1b\x64\x69\xec\x9f\x8b\xa3\xd6\xd1\x5a\x9a\x9f\x4e\xde\xb6\xbf\xd1\x18\x78\xde\x56\x2b\x38\x9a\x4f\x7b\xfc\x6a\x9f\x7a\x3c\x93\x83\xdb\x3d\x75\x57\x4e\xf2\x87\xb9\x35\x2d\x90\x29\xbc\xa7\xe8\xd0\x50\xfb\x9c\x6b\x27\xe9\x2a\x3f\xaa\x79\x03\x6d\x07\xfd\xbd\x7a\x85\x9a\xe5\xa6\xdd\x01\xa7\x7d\x5a\xc2\xce\x32\x23\x3b\x34\xab\x6f\x1e\x08\x2a\x06\xe0\x71\xd4\xf8\xb4\xcb\xa4\x40\xfb\xc4\x95\xfb\xd9\x00\x60\xe7\x8c\xad\x92\xd5\xbf\x71\xfb\x38\x68\xbf\x33\xb0\x76\x23\x7c\x73\x35\xcd\xc2\x93\x4f\xb8\xb5\x85\xaa\x3e\x01\x33\x26\x82\xac\xea\x1b\xe7\xc5\x13\x13\x6a\xc4\x4c\x02\xd1\x90\x84\xcb\x54\xdd\x8e\x50\x05\x23\x92\xa8\x91\x12\xa9\xd4\xd1\xf6\x2a\x59\x0d\xc3\x11\x97\x36\x18\xf8\x68\x60\x7e\xb4\x8b\x21\xf3\xf2\xb6\x65\x00\x27\xc6\x06\x6e\x14\x65\x18\xe7\x6b\x34\xe2\x12\x58\x26\x12\xca\x8a\x4d\x3c\xa0\x52\x41\x1f\x87\xc3\x37\x1e\x0f\xd4\x60\x6f\x21\x0f\x9a\xd1\x3d\x34\x77\xc5\xf9\xe7\xc5\x07\xef\x3c\x7e\x77\x2f\x57\xe3\x03\x50\x2b\x29\x7c\xc7\xf9\xe7\x86\x07\xc4\x13\x85\x3c\x62\x78\xc0\x7c\x89\x24\x1d\x35\x34\x77\x84\xbb\xc3\x82\xfa\xca\xa0\x78\xf6\x95\x97\x7e\xb9\xad\x42\x60\xb4\xc2\x55\x57\x0f\x5d\xb6\x23\x15\xd1\x63\x89\x75\x93\x45\x3b\xb0\x4c\x45\xb4\x1b\xc9\xad\xcf\x97\xef\x23\xd8\x1f\x3c\xb2\xb1\x97\x60\xfb\x00\xd4\x04\xfb\x43\x1a\xff\x3e\x82\xed\x8f\x86\x1f\x20\xd8\xb4\x99\x5a\x3a\x82\x05\x91\xe8\x9b\xb7\x6e\xd1\x76\xbd\x1e\x47\x6c\xde\xb6\xa0\x00\xdd\xa2\x13\xf1\x80\x94\xf3\x1c\xa5\xc5\x62\x48\x05\x06\x6d\xe2\x1e\x52\x99\x44\x64\xfb\x78\x09\xe8\xd3\x02\xe0\x6e\x1a\x4c\xb8\x6c\x3b\xec\xa1\xa2\xd6\xbd\x3f\x4e\x30\x69\xdd\x7b\x8f\x48\xf2\xe6\x6a\x6a\xaa\xda\xde\x5d\xbe\x33\xee\xf2\x2f\x18\xf3\x51\x8f\xa2\xf4\x1a\xa2\xbe\xe5\xed\x4f\x7a\xb7\x48\xef\xd8\x84\xd1\x1b\x2e\xc2\x41\x56\x54\x66\x44\x61\x8f\xe8\xfc\xda\x65\xbb\xb4\xe8\xdc\xea\xe6\x40\xc3\xdb\x6c\xdf\x5b\x9b\x22\x57\x84\x69\x82\x32\xc2\x42\x20\x91\xb4\x99\x92\x5b\x27\x7d\x1f\xf5\x90\x6d\x01\x13\x5d\xd6\xda\xe9\xb1\x78\x4c\x95\xb2\xda\xa7\x07\x37\x59\x6b\x03\xd6\xd4\xce\x13\x8b\x8c\xa7\xde\xae\xcc\xee\xac\x74\xce\x94\x64\x40\xb0\xc6\xe0\x93\xdd\x06\x10\x95\xe8\xab\x28\x33\xac\x33\xc0\xda\xbd\xcf\x73\x07\x3f\xf9\x6d\xe7\x5e\x1e\xa1\x05\x46\xcd\x29\xd8\x56\x7f\xa4\x5f\xf0\x85\x41\x07\xa7\x70\x70\x0a\x3b\xd0\x3d\x38\x85\xd2\x38\x7f\x49\xa7\x60\x0d\x67\xab\x5f\x68\x01\x59\xf7\x02\xfb\xf8\x85\x16\x90\x4d\x6f\xd1\xdf\x2f\xb4\xaf\xef\xeb\xde\xc2\x6d\x21\x3c\x78\xbb\xa2\xf7\x1e\xc5\xce\x8d\x89\x27\xb0\x19\xd1\x77\x07\x22\x44\xa9\x85\x75\xae\x88\xf2\x98\xc9\x7e\x1b\xc1\xf9\x56\x66\x0e\x69\x7f\x47\x2c\xcb\xdd\xbd\x19\xed\x5a\x36\xdb\xee\x75\xea\x2e\x1f\x65\xbe\x8b\x6e\xd2\x47\x20\xd7\xe6\x18\x49\x96\x83\xca\xf5\xcd\x23\x6a\x6e\xca\x2b\x39\x6f\xa3\x6f\xbc\x38\x9d\x52\x1c\x22\xa0\xae\xa8\x20\x8a\xb4\x6e\x64\x39\x6f\x0f\xd8\x2c\x0b\x7e\x3a\x99\x4f\xaf\x26\xa7\x1f\xe7\xd7\xe3\xeb\x6a\x1a\x1c\x4c\xad\xe2\xd9\x99\xfe\xe7\xd5\xe4\x7c\xf6\x7e\x72\xda\x4c\x7b\xfb\x53\xde\x47\xed\x60\x3d\x6d\xf3\x71\x3c\xef\xdc\xc0\xb5\x37\xee\xa8\xc4\xe7\x49\x83\x3d\xce\xf5\x80\x88\x4c\xba\x9e\x7d\x64\xc0\xed\x68\xd4\xce\x78\xb8\xd3\x50\x84\xc1\x02\xf3\x43\xa0\x6e\xde\x3c\x73\x65\xf7\x5e\x33\x39\x98\x2e\x81\x80\x34\xa7\x77\x20\xe4\x28\xcd\x81\x0b\x73\x5a\xa4\x2c\x08\x6e\xa4\xe6\xf9\x26\xfb\x94\xe4\x45\x77\xcf\x65\x06\xcc\x81\x92\x98\xdf\x61\x08\x8c\x03\x0a\xc1\x05\xc4\x28\xa5\x6b\x2c\x50\xa5\x82\x35\x8b\xe7\xf4\xf3\x76\x9b\xc9\xa7\x39\xe9\x43\x97\xe6\xc4\x5f\x76\x90\xe9\x76\x7c\xf1\xe1\x76\x50\xd2\x01\xa2\x94\xb6\x42\xb2\x9b\x76\xed\x2e\x2c\x42\x15\x36\x94\xb6\xa0\x33\x9d\xa8\x51\x6e\x3d\x83\xcf\xd3\x4a\xcd\xf2\x65\x1a\x39\xb3\x6f\x8e\xb0\xb1\x55\xed\x6c\x8a\x4d\xcf\x1a\xe4\x8a\x43\x46\xca\x5b\x7f\x07\x7a\x86\xed\x84\xc8\xaa\x9a\x96\x37\xa1\xc6\x17\x1f\x3c\x9b\x50\xe7\xe3\x8b\xf1\xbb\xc9\xd5\xae\xfd\xa7\xf1\xc5\x87\x01\x8c\x2f\xaf\x07\xf0\xe1\xe6\x7c\x00\x3f\x7d\xb8\xbc\x9c\x5c\x0d\xe0\xdd\x6c\xd6\x5f\x25\x3d\x43\x79\x5a\x8d\x2f\x3e\xf8\x7e\xbd\xbc\xf6\xfc\xfa\xe1\xe6\xdc\xf3\xab\x45\xce\xf3\xe2\xdd\x6c\x56\xfb\xd5\x9f\xac\xdc\x47\x89\xbd\xa1\x6e\xcf\x04\x65\xae\x51\xe3\xb2\x3a\xd8\xe8\x20\xf2\x79\xfd\x96\x53\x69\xd9\x41\x2a\xbd\x02\x58\x6c\x8b\x38\xa2\xac\x8e\xde\xfc\x2e\xe4\x27\xd1\x2a\xa7\xce\x88\xaa\x74\x75\xa7\xa4\x4a\x51\x86\x2d\xab\xd8\xbf\xfe\xe3\xb2\x72\xf0\xb8\xc1\x29\xcd\x9f\xac\xc2\x23\xbb\x8c\xa2\x74\xe7\x43\x01\xb4\x47\x81\x4a\x77\x8d\x73\xa5\xba\x39\xe2\x69\x98\xfd\xe9\xf8\x30\x6a\x62\x9a\xd7\x38\x37\x38\x69\xcf\x8b\xd7\xc6\xb7\x07\x3e\xf7\x0f\xc4\xae\x6c\xbf\x36\x16\xcd\x12\x7b\xe4\x47\xcb\x4c\xe4\x62\x32\xd7\xa5\x64\xc6\x80\x33\x17\x86\x7d\x81\xd1\x57\x93\xc2\xce\xf2\x0f\xa1\xe8\x92\x78\x53\x04\x5d\x8b\xbd\x46\x69\x81\x83\xb2\x4b\x55\xad\x14\x48\x20\x77\x84\x46\xe6\x72\x17\x5b\xb3\x6d\x7c\xa4\x5b\x53\x48\x85\x89\x2c\x0e\x35\x6b\x6a\xda\x12\x1c\x3b\xd7\x82\xfd\xd6\x81\xad\x6b\xc0\x1d\x87\xc4\x76\x4e\x42\x3b\x5f\x76\xa5\x3d\xc0\x9e\x06\x9c\x32\x89\x41\x6b\x59\x77\x86\xe0\x82\xf3\x08\x89\x7f\x37\xb8\x39\x57\x65\xb8\x2d\x5d\xaa\x41\xce\xa9\xf5\x76\xd2\xd6\x8c\x46\x12\x87\xf0\xa3\xf6\xac\xe6\xdf\x83\x4c\x15\xed\x51\x9a\xb4\xfd\x40\x1d\xd8\xaa\xd3\xc2\x98\x4a\x93\xdc\x09\x33\x6d\xca\x44\xd1\x11\x75\x85\x31\x57\x1a\x5f\x6b\x70\x65\x4b\x85\x89\x7e\xb2\x93\xcc\xf9\x41\x9e\x81\x31\xbc\x9c\x45\x5b\xcd\x66\xc5\x03\x1e\xb9\xf2\x62\x25\x08\x93\x09\x17\xea\x28\x22\x5b\x4f\x70\x98\x3d\x86\x3d\x54\x6d\x0d\x55\xe6\x56\x1e\xbb\x1a\x7d\x77\x32\x37\x05\x49\x56\x26\x60\x85\x0c\xed\x22\x0d\x58\x1a\x2f\x50\x18\x5c\x5a\xa1\x96\x71\x1c\xfa\x6b\x4c\x56\xc1\x8e\x6c\x44\xc7\x91\xc5\x66\x02\x26\xf0\x27\x5f\xf6\x11\xdd\x76\x18\xcd\x4d\x58\x73\x58\x51\xdb\x7b\x98\x2b\x2e\xb4\x73\xcb\x26\xb5\x2d\xbb\xb0\x4b\x0b\xf4\xb3\x48\x83\x4f\xe8\xa9\x52\xab\x73\xa6\x33\x4f\xe3\xe1\xce\x5b\x03\xb7\xa3\x7d\x55\x0d\x6c\xf3\xbc\x00\xd6\x47\xad\x9d\x9b\x36\x5a\xed\xf3\x8e\xde\xa1\xb9\x7b\x2a\x2b\xe2\xbf\xb9\x3a\x3b\x86\xdb\xb5\x52\x89\x3c\x1e\x8d\xa4\x05\x35\x5c\x19\xf8\x24\xa1\x72\x18\xf0\x78\x14\x6f\x8f\x2c\x1b\x46\x4b\xce\x47\x0b\x22\x5e\xfc\xe3\xcd\x3f\xbf\xfe\xe6\xdb\xef\xda\x92\xed\x8e\x31\x6b\x7b\x0e\x3c\x45\xd8\x98\x15\xe9\x02\xe1\x36\x87\x75\xdb\x22\x83\x65\x6e\x75\x1d\x62\x2a\x9e\xb2\xc3\x77\xcc\x18\xed\x64\x6f\xbb\xcb\x2f\x3f\x85\x8a\xed\x96\x00\xca\x14\xfa\x96\x7c\xa5\x11\x5d\xbd\x37\x65\xea\xdb\xaf\xfb\x8b\xca\xbb\x1c\x89\xbe\xe2\x72\xee\x6c\x52\x5e\x39\x4c\x97\xd6\xa4\x7f\xa4\xce\xf6\xea\x60\xd4\x9a\xd3\x6e\x79\x69\x98\x98\xcf\x95\xc0\x3f\x52\xd6\xb2\x9e\xed\x18\x59\x8c\x1f\x5f\xbb\x67\xdd\x9e\xa8\xae\xdd\x17\xa5\x85\x43\x17\x67\x61\xdc\x66\x0e\xed\x93\xdb\x77\xf8\x79\x8d\x02\x7f\x81\x57\x19\xb7\x03\x0d\xce\xf1\xda\xf0\xd9\xb1\x7f\x14\xf2\x40\x8e\x18\x89\x29\x5b\xbd\xb0\xa3\x98\x1b\x6f\x5e\x7f\xe1\x66\xc4\x41\x68\x33\x22\x6d\x09\xef\xfd\xb2\xea\x9d\x49\xef\xd6\x95\xe0\x34\xcc\xa6\x32\xf3\x3f\x03\xd8\xac\xa9\x2b\xcd\x77\x51\x7c\xb7\x3a\xeb\x00\xc2\x56\xfa\xbb\x80\xb4\x28\x99\xd3\xfe\xd1\x24\x77\x72\xdb\x38\x2c\x22\x60\xa0\x1d\x30\x4b\x9d\x2b\x49\x1d\x73\xc3\x0d\xf5\x26\x5d\xc0\x64\x47\x7c\x99\xd1\x32\x33\xf7\x09\x0a\x6c\x60\xf5\xd9\x71\x41\x27\x98\x5a\x68\x60\x6c\x38\x0d\x1c\x29\x8f\x12\x14\x64\x41\xe1\xe3\x1b\x8e\x93\x5d\xe1\x66\x0f\x4b\x7f\x5b\x35\xf5\x66\x47\xe3\xd6\x18\xfb\x0e\x4b\xa8\x9f\xf9\x0f\xe3\x37\xdf\x7c\x9b\x53\xa7\x97\x41\x6b\xbc\x77\x6e\x6b\xa0\xe3\xe7\x80\xc7\x7a\x45\x9c\x6d\x05\xec\x0c\x8e\xc1\xe6\xaf\xca\xca\x90\x5d\xc7\x54\x1a\xc6\xa6\x09\xed\x41\x34\x73\x76\x4c\x99\xdc\x23\x7b\xd9\xed\xc0\x8b\xf3\x2e\x45\xf4\xae\x97\x06\xc5\xd2\xad\xa2\x6d\xb0\x24\xb4\xe5\xde\x8b\xec\x59\xe0\x92\x0b\x04\x91\x32\x5b\x59\x59\x9c\xe4\x37\x6a\xd8\xce\xbd\xd6\x3a\x4a\xfb\x3c\x4c\x16\xda\x6a\x2a\xed\xd3\x28\x5c\x35\x37\x41\x39\x43\xc3\x61\x89\x19\x73\x32\xdf\x31\xcd\x76\x06\x3a\x59\x90\xd5\x47\x9b\x2b\xa5\x94\x4d\xda\x98\x35\x8c\x99\x99\x84\xa8\x75\xe9\x84\x63\x76\x48\x87\x74\x4f\xd4\xaf\x19\x8c\xdf\x8e\x47\xa3\x5f\xb3\x0d\xed\xdf\xea\xec\xfc\xd3\xf7\x62\x9c\x95\xd9\x73\x2b\xe6\xb4\xd8\x36\xc9\x37\x1b\xb2\x4c\xad\x01\x04\xfe\x0d\x1a\x0f\xcb\x1a\x5b\x36\x59\x0a\xa2\x80\x6c\x33\x69\xb5\xbb\xc7\x2a\x3b\x33\xbe\x14\x32\xab\xde\xc7\xb0\x48\x15\x6c\x38\x7b\xf9\xd2\xd8\x0e\xeb\x69\xcc\x36\x01\xc3\x4d\x7e\xfd\xd7\x10\x6e\x2e\x4f\xc7\xd7\x93\x53\x9f\x64\xef\x42\xa2\x3e\xa4\x65\x48\xde\xd0\x03\xb1\x84\x05\xc9\xae\xbb\x73\xa8\x0c\x4c\xfc\x5a\xfb\xb1\xa8\x28\x68\x87\x59\xdd\xba\xa4\xb2\x84\x8e\xdd\xfb\xb1\x0e\xe9\xce\xe5\x66\xab\x37\x4a\xe4\x97\xfb\x36\xe1\xe6\x59\x07\x16\x56\x76\x1a\xdc\xad\x6b\x5c\x64\xde\x3b\x77\xba\xce\x60\xda\x51\x3d\x10\xad\x88\x98\x09\xd7\xda\x5a\xde\x5f\xdb\x6f\x37\xcd\x03\xfb\x0b\xdb\x5f\x73\x32\x32\xd7\x16\xf5\xb3\x12\x7f\xd3\x12\xa0\x6e\x1d\x1d\xdb\x8b\xf3\x5c\xbe\x4f\x91\x4f\x68\x55\xcc\xa1\x62\x4d\x59\x49\xdb\x66\xd6\x59\xf8\x73\x54\x54\x81\x54\x3c\x91\x80\xf7\x18\xa4\x66\x77\xc7\x06\x69\x5a\x1a\xf2\x3d\x33\x27\x16\xf9\x55\x3e\x65\x4f\xe4\xf3\x23\x63\xb6\x75\x60\x2c\x7a\xaf\xec\x4d\x12\xe6\x5e\x66\xed\xde\xcc\x25\x79\xe6\xaa\x6c\x54\xc6\xa4\x60\x22\x5f\x9b\x04\x91\x1e\x4d\xf0\x16\xcd\x5f\x90\xe0\xd3\x83\x2a\xb4\xac\x00\xfb\x79\xfb\x34\xf2\x9f\x9d\xe2\xd1\x23\x05\x6a\xcf\x89\x4d\xee\x95\x20\x41\x57\x06\x60\xff\x20\x78\x5c\x07\xfd\xd0\x78\xb8\x4c\x62\x5f\xa0\x15\xc5\x70\xad\xed\xbd\x6f\xee\x60\x1c\x65\xce\x58\x15\xab\xc9\xce\x42\xb3\x3e\xcc\x84\x52\xb6\x7f\xda\xba\x36\x83\x07\x87\x4a\xe3\x1c\x76\xdf\x88\xa9\xba\x73\x47\xc3\xc2\x9d\x44\x78\x47\x98\x2a\x62\xca\x8e\x35\x15\xb8\x4b\x42\xdb\x36\x0a\x4a\x23\x2b\xca\x7a\xe6\x91\xf6\xa5\xfd\xb4\x00\xde\x97\xf8\xd3\x6c\x3e\x4d\xa1\x87\x95\x81\x7c\xfe\x15\x1f\x42\x29\xeb\xde\x9d\xcd\x19\xdd\x6a\x4f\x7f\x46\x59\x7a\xaf\xdd\xde\xed\xc9\xb1\xf9\xe1\x47\x7b\xf1\x47\x3b\x47\x54\xeb\xc9\xd2\xcf\x61\x45\xeb\xf1\xd2\x5a\xeb\x07\xe9\x53\xe7\x59\x53\xfb\x54\x03\xc3\xaa\x88\x29\x57\xad\x61\xd7\x2b\x19\xab\x33\xee\x77\x2f\xcc\x1a\xa1\x40\xe3\x58\x29\xbc\x1f\x9f\x4d\x4f\xc7\xd7\xd3\xd9\xc5\xa0\xe6\xbf\x4f\x7e\x98\x9c\xfc\xbb\xed\xdc\xa9\x7d\xaa\x1d\x26\x17\xdf\xcf\xae\x4e\x26\xe7\x93\x8b\x6b\x2f\xac\x8f\x97\xb3\xf9\x75\xb9\x55\x57\x22\xb7\xfd\xd8\xaa\x7d\x8e\x1a\xb4\x74\x36\x2e\xc8\xec\x6c\xe6\xc1\x7a\x8f\xf6\x25\xd2\xf6\x1d\xa5\xc1\x1b\x2f\x80\x30\xf9\xb4\x9a\x96\x02\x82\xc7\x73\x2d\xa7\x35\xc8\x8f\xe1\x59\x7a\xc2\xac\x08\xbf\x6b\x2f\x81\x40\x88\x0b\x5b\x9c\x79\x47\x89\xa1\xfc\xe0\x44\x9a\x08\xef\x70\x22\x9a\x7f\x27\x3c\x69\x39\x0e\xfb\x10\x41\xf9\xde\x41\x7c\x0c\x01\xd9\x01\xab\xc2\xb2\x13\x9e\x98\xab\x84\xad\x48\xf0\x2c\xd6\xf0\x55\x94\x96\x9f\x83\x6c\x3c\xa9\x00\xa3\x4a\x3c\x59\x48\x1e\xa5\x0a\x6d\xfa\xa8\x96\x19\xe8\xa6\x9d\x43\x92\xda\x8a\x25\x2d\x2f\x1d\x1b\x39\x77\x28\x36\x82\xb6\xa7\xaa\xa1\x67\xc1\x82\x87\xfe\x59\x06\xbb\x2f\xf5\x3f\xae\xd1\xac\xf9\x14\xb7\xbb\x6e\x76\x71\xa9\x97\x6c\xfa\xa7\x1c\x55\x7b\xb5\x7f\xf7\x54\x58\xd5\x97\x95\xfb\x80\x4c\x2a\x37\xaf\xee\xb2\x75\xef\x91\x40\x12\x6e\x2d\x44\xd9\x51\xfd\x5e\xc0\xcc\xd2\xae\x19\x3a\x0a\x59\x0e\xd3\xa0\x4a\x25\x04\x9c\x49\x1a\xa2\x30\x97\x6b\xcb\x34\x08\x50\x76\xc4\x74\xfa\x69\x96\x6b\xb4\x36\x2f\x7d\x6e\xe8\xf1\x65\xf6\xb2\x00\xde\x3b\x5e\x3b\xd1\xd4\x6a\xf6\x19\x4d\x15\x88\xc0\x03\x45\x22\x08\xe9\x8a\x2a\xe9\x52\xab\x02\xdd\x75\xc6\xdd\x11\x15\x65\xc0\x45\xe8\x4a\x64\x2b\x5f\x56\x72\x66\x80\x6f\x98\x7e\x6b\xae\xf6\x75\x15\x23\x5a\x66\x3a\x81\xa6\x12\x85\xcc\xab\xe7\xcc\x24\xbe\x92\x34\xa6\x11\x11\xd1\x36\x4b\x31\xb1\x34\x36\xdb\x1d\x31\x0f\x6d\x69\xd3\x2e\x3c\x75\xa7\xc8\x04\xf0\xc1\x3a\xe6\x21\xa4\x8a\x46\x54\x6d\x5f\x0f\x61\x42\x82\xb5\xa5\xbe\x20\x5b\x9b\x6e\xc3\x9c\xee\x44\x3a\x55\xd9\x86\x71\xfe\x4d\xaf\xaf\xcd\xaf\x01\x17\x02\x65\xc2\x99\xbb\xce\xdf\x99\x37\x12\x96\x99\xd4\xcd\x5b\xdd\xe3\x4d\x0d\x98\xcc\x40\x59\xcd\x5a\x50\x35\xc8\xa5\x99\x33\xf3\xc3\x8e\xb4\x77\x03\x92\x4d\xef\x94\x27\x2f\x5f\x17\xe5\xb5\xb7\xdd\xbc\x95\xf0\xdd\x37\xdf\x0c\xe1\x2d\x6a\x1b\x60\x2a\x94\x78\x8c\xd9\x9e\xa9\x91\x84\xda\x27\xb7\xec\x85\xbf\x9d\x40\x89\x94\x3c\xa0\x26\x53\x9a\x2d\x06\x34\xf7\x06\x96\x70\x4b\xb4\xc3\xfc\x18\xbe\xb3\x9c\x6d\xbf\xc1\xd0\x3e\x79\xfb\x6f\xf2\xf6\x16\xdc\x31\x7c\x6b\x7f\xe1\xe6\x3a\xb3\xaf\xfd\x81\xbe\x16\xc4\xc9\x3d\x06\x8f\x1b\x9c\x68\x88\x8f\x15\x9c\x74\xc0\xaa\xa5\x43\x0c\x23\x5c\x3a\xc4\xb9\x67\x2e\xcc\x19\xbc\xa8\xd3\x03\xf5\x8a\x4f\xb4\x2f\xc0\x70\x72\x4f\xd5\x09\x0f\xbb\x6b\x5a\x76\x1d\x76\xf3\xb2\x6d\x5c\x1b\xa0\x77\x3a\xa0\x64\xb4\x7f\xfe\xea\x97\x72\xfd\x69\x92\xad\x3c\x6d\x6d\x7e\xa7\x18\x59\x79\x2c\xd5\x18\x0b\xbe\x12\x24\x76\xbb\xd5\xba\xbb\x2d\x63\x0d\x69\x60\x73\xe3\x3d\x3c\xca\x1e\xc7\xee\xca\x1d\xfa\x1c\xbe\xb3\x4f\xe7\x11\x3c\xfb\xf4\x2d\xef\xe9\x5f\xe0\xd3\x10\xe1\xee\x0e\x44\xac\x7e\x07\x61\x11\xab\xde\x02\x32\x16\xab\xd4\x7e\xe7\xc8\x66\xd0\x13\x22\x65\x71\x20\x2c\xdf\x7b\xb6\x76\xc4\x7c\x7d\xe4\x49\x4c\xe9\xce\xa0\xa2\xff\x09\x4b\xfb\x7c\x59\x2b\x90\xdd\xeb\x8e\x5e\x6b\x0b\x63\xfc\x2e\x89\x5a\x3f\x3e\x4d\x67\x19\xe8\x7d\x48\xaa\xad\x26\x96\x45\x30\xe4\x56\x16\x06\xe1\xdd\x31\xb5\x39\x53\xe3\x27\x3b\x96\xf4\xf7\x49\xcf\x9c\x57\x01\x3f\x86\x7f\xeb\x07\xb2\x25\x39\xc3\xe0\x7c\x3e\xfd\xeb\x3a\xb6\x2b\xeb\x75\x02\xdd\xc7\x7d\x66\x28\xf3\x3e\xb9\x97\xea\xd8\xe6\x2d\x9e\xca\x11\xb6\x6c\xa7\xda\xb9\xaf\x65\x1a\x45\x5b\x1d\xe5\x99\x90\x30\x15\xd9\x01\xad\xee\x52\x13\xeb\x6a\x9f\x84\x99\xfc\x53\x3d\xdf\x97\x64\x50\xff\xd0\x94\xce\x32\x22\xbf\x83\xd7\xff\x5e\x43\xdd\xc7\xda\x1a\x34\xb4\xb4\xa6\x12\xed\x99\xc6\xca\xa6\x39\x1a\x03\xd2\x5b\xe2\x9f\x8f\xe8\xf3\x5f\xe0\x15\x1d\xe2\xb0\x5c\x0d\x69\x46\x79\xfd\x37\x0c\x1a\x44\x12\xff\x3e\x8e\xe6\xaa\x0a\xf8\x31\x1c\x4d\x3f\x90\xad\x8e\x46\x24\x71\xb1\x0d\x60\x14\x20\x89\xb3\x5c\xc3\x21\xeb\xdb\x44\x78\x87\x89\xb0\x63\x5e\xa5\x8f\x28\x33\xf3\x0c\xe4\x63\x48\xcb\x2e\x60\x55\xae\xa5\x4c\x42\xc0\xe3\x98\xb0\xd0\xde\x61\x09\x72\x8d\x51\x74\x08\x4a\xda\x61\x1e\x82\x92\x3f\x2b\x28\xd1\x43\x8b\x44\xa0\xf2\x5d\x12\x51\xc7\x73\x5f\x93\x33\x2d\x80\xf7\xe9\xb4\x97\x2a\x96\x60\xef\x55\xc8\x60\x8a\x3d\xcd\xdf\x65\xda\xb3\xa0\x40\x71\x10\xa9\x3b\x3d\x6b\x1a\x75\xef\x10\x4c\x97\xc0\x78\x05\x0e\x2d\x7d\x1e\xad\x04\x46\xff\xee\x32\x93\xdd\xe9\x4b\x5b\x9d\x14\x6d\xb3\x63\x16\x11\xfd\x84\xd1\xd6\x9e\xc0\x34\xea\x90\x15\x74\x1a\xb0\xee\x2c\xe6\xcf\x72\x8d\x0b\xb2\xc3\x91\x47\x94\xa1\xfc\x25\x3f\x44\x83\x6c\xb8\xa1\x9f\x68\x82\x21\x25\x43\x2e\x56\x23\xfd\xd7\x68\x6e\x01\x7d\x7c\x75\xc3\xe8\xfd\xeb\xce\x28\xc6\x53\xc9\x31\xbd\xb8\x9e\x5c\x5d\x5e\x4d\xae\x6b\xf7\x34\xc0\xc5\xec\x62\x32\x80\xf9\x0f\x93\xb3\xb3\xee\x74\xf8\xe5\xec\xc7\xc9\x95\x69\xf7\x79\x75\x18\x2d\x98\x74\xf6\xd1\x48\x76\x36\x30\x78\x75\xb6\x28\xd0\x6f\x6d\x66\x27\xee\xf1\x75\xcd\x2a\xc6\xc3\x5c\xbb\xf1\x4f\x99\xa4\xda\xac\x58\x26\xac\x75\x09\xf8\x63\x2f\xa2\xb8\x69\x5c\x40\x55\x5c\xb7\x90\x55\xb7\x1a\xed\x68\x2f\x6c\xb6\x1a\xe2\xbe\x77\x66\x6f\xb5\xa8\xd7\x76\x97\xbe\xe3\x39\x34\xe8\x99\x23\xd8\x1e\x88\xc5\x75\x8a\x8a\x67\x48\x6d\xf3\x03\x9b\xd9\xae\xda\x1a\xa3\xc4\x5e\x87\xa2\x2d\x42\x4c\x99\x59\x6b\xb4\x6c\x88\x95\xae\xcf\xa2\x28\xed\x41\xa6\xe2\xbe\x2c\x7b\x67\x57\x8c\x84\x39\xef\x2a\xf0\x3f\xba\x43\xa9\x40\xdb\x03\x33\xff\x50\x76\x56\xda\x9d\x6f\x5b\x99\xe3\x51\xf9\x97\x9d\xb3\xe2\x73\x5b\xf6\x6e\x4b\xb5\x41\x52\xff\x3e\xb6\xf9\xc6\x73\xf3\x9b\xde\x15\x84\xcd\x39\x93\xe2\x43\xf2\x75\xd9\x29\x95\xe1\x7e\x56\xad\xf4\x4d\x47\x39\x6f\xaf\x52\x69\x83\xc8\x1f\x51\x28\xed\x2a\xe7\x7d\x5f\x23\x85\xea\xe0\x0f\x2a\x99\xf6\x89\x68\xa5\x88\xfa\xe1\x25\xd3\xb4\xab\xe0\xf8\x69\xd4\x4c\x77\x89\xc9\x5f\xa3\x64\xba\x44\xe1\xa1\x62\xfa\xcb\x5c\xda\xc2\x17\x58\xd0\x74\xa8\x98\xde\x5f\x9d\x0e\x05\xd3\x87\x82\xe9\x9d\xa3\xfc\x55\x0a\xa6\x4b\x8a\x70\xa8\x97\x6e\xa3\xf2\x6f\x5c\x2f\x5d\x92\x8f\x43\xb9\xb4\x87\xd0\xbf\x75\x74\x71\x28\x97\x3e\x94\x4b\x1f\xca\xa5\x0f\xe5\xd2\x87\x72\xe9\x43\xb9\xf4\x9f\x50\x2e\x5d\x8b\x4d\x0e\xd5\xd2\x9d\x64\x1f\xaa\xa5\xbb\xcd\xcd\x9f\xbd\x3d\x7b\xa8\x96\x7e\x02\x85\x4f\xf0\xc5\x2d\x40\x0e\xd5\xd2\x6d\xcf\x53\xaf\x96\x2e\xb9\xb7\x43\xb1\x74\x3b\xa1\x87\xba\xa4\x27\xef\xf8\xbe\x24\x7b\x7a\x28\x96\x3e\x14\x4b\xff\xbd\x8a\xa5\x4b\x7e\xe6\x50\x2b\xdd\x83\xe2\xbf\x75\xad\x74\x49\x58\x0e\xa5\xd2\xad\xc4\x1e\x42\x92\x0e\x9a\x9e\x40\x48\xf2\x14\x4a\xa5\x7d\x9a\x78\xa8\x94\x3e\x54\x4a\xb7\x3d\x87\x4a\x69\xcf\xf3\x38\x95\xd2\xae\xba\xf6\xf3\x8a\xa5\xdf\x5b\x20\x8d\x66\x8d\x68\xbd\x72\x91\xb4\x56\xad\xea\x7d\xd6\xc3\x0c\x52\xfe\x2d\xcd\xc4\xc3\x00\xc5\xe1\x6b\x48\x50\x50\x1e\x82\xc4\x84\x08\xe3\x5a\xec\x66\x8e\x84\x57\x38\x5c\x0d\xe1\x1f\x6f\x86\xff\xfc\x7a\xf8\xcd\xb7\xc3\xef\xfe\xa7\x24\xf4\xd6\x0d\x5d\xd3\x6a\x71\x78\x0b\xa5\x99\x7d\xd6\x5d\x8e\x14\x8d\x9b\x9f\x76\xab\x95\xfa\x5e\x57\xdb\x08\x24\xe1\xcc\xec\x4a\xd4\xbc\x50\xf5\x0b\x74\xa9\x4a\x52\x65\xec\xc1\x10\xce\x88\x54\xa0\x87\xb2\xdc\x29\x19\x4e\xd8\x10\x99\x79\xd1\xea\x47\xe8\x3a\x3e\xde\xf7\xff\x01\x00\x00\xff\xff\xc5\xdd\xf4\xec\x84\xb4\x00\x00"), }, "/osconfig/beta/os_policy_assignment.yaml": &vfsgen۰CompressedFileInfo{ name: "os_policy_assignment.yaml", @@ -814,9 +814,9 @@ var Assets = func() http.FileSystem { "/run/alpha/service.yaml": &vfsgen۰CompressedFileInfo{ name: "service.yaml", modTime: time.Time{}, - uncompressedSize: 43926, + uncompressedSize: 43917, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x6b\x73\x1b\x37\xb2\xe8\x77\xff\x8a\x2e\x7b\x6f\x31\xd9\x22\x29\xd9\x9b\x4d\xf6\xea\x9e\xfd\xc0\x50\x63\x9b\x1b\x89\xd4\x25\x29\xe7\xf8\x9e\x3a\x45\x81\x33\x20\x89\xf5\x0c\x30\x06\x30\x92\xb8\x29\xff\xf7\x5b\x78\xcc\x1b\xf3\xa0\xe4\x24\xbb\x5b\x67\xf2\x21\x32\x07\xd3\x68\x34\x1a\xdd\x8d\x7e\x00\xaf\x60\xca\xe2\x23\x27\xfb\x83\x84\x37\xe7\x6f\xde\xc0\x3b\xc6\xf6\x21\x86\xab\xab\xe9\x18\x26\x61\x08\x4b\xf5\x4a\xc0\x12\x0b\xcc\xef\x71\x30\x7e\xf1\x0a\x5e\xbc\x82\x2b\xe2\x63\x2a\x70\x00\x09\x0d\x30\x07\x79\xc0\x30\x89\x91\x7f\xc0\xe9\x9b\x21\x7c\xc0\x5c\x10\x46\xe1\xcd\xf8\x1c\xbe\x51\x0d\x5e\xda\x57\x2f\xbf\xfd\x3f\x2f\x5e\xc1\x91\x25\x10\xa1\x23\x50\x26\x21\x11\x18\xe4\x81\x08\xd8\x91\x10\x03\x7e\xf4\x71\x2c\x81\x50\xf0\x59\x14\x87\x04\x51\x1f\xc3\x03\x91\x07\xdd\x8d\x05\xa2\xf0\xf8\x68\x41\xb0\xad\x44\x84\x02\x02\x9f\xc5\x47\x60\xbb\x62\x3b\x40\xd2\x60\xac\x9e\x83\x94\xf1\xc5\xd9\xd9\xc3\xc3\xc3\x18\x69\x6c\xc7\x8c\xef\xcf\x42\xd3\x52\x9c\x5d\xcd\xa6\xde\x7c\xe5\x8d\xde\x8c\xcf\xcd\x37\xb7\x34\xc4\x42\x00\xc7\x9f\x13\xc2\x71\x00\xdb\x23\xa0\x38\x0e\x89\x8f\xb6\x21\x86\x10\x3d\x00\xe3\x80\xf6\x1c\xe3\x00\x24\x53\x18\x3f\x70\x22\x09\xdd\x0f\x41\xb0\x9d\x7c\x40\x1c\xbf\x78\x05\x01\x11\x92\x93\x6d\x22\x4b\xe4\x4a\xf1\x23\xa2\xd4\x80\x51\x40\x14\x5e\x4e\x56\x30\x5b\xbd\x84\x1f\x27\xab\xd9\x6a\xf8\xe2\x15\xfc\x3c\x5b\xbf\x5f\xdc\xae\xe1\xe7\xc9\x72\x39\x99\xaf\x67\xde\x0a\x16\x4b\x98\x2e\xe6\x97\xb3\xf5\x6c\x31\x5f\xc1\xe2\x2d\x4c\xe6\x1f\xe1\xa7\xd9\xfc\x72\x08\x98\xc8\x03\xe6\x80\x1f\x63\xae\xf0\x67\x1c\x88\x22\xa4\x99\xbd\x15\xc6\x25\x04\x76\xcc\x20\x24\x62\xec\x93\x1d\xf1\x21\x44\x74\x9f\xa0\x3d\x86\x3d\xbb\xc7\x9c\x12\xba\x87\x18\xf3\x88\x08\x35\x9d\x02\x10\x0d\x5e\xbc\x82\x90\x44\x44\x22\xa9\x7f\xa9\x0d\x6a\xfc\x82\xd0\x1d\xbb\x78\x01\x20\x89\x0c\xf1\x05\x2c\x13\x7a\xb6\xc2\xfc\x9e\xf8\xf8\x05\x40\x80\x85\xcf\x49\xac\x3e\xbe\x80\xf5\x01\xab\xd7\x60\x5f\x03\xc7\x82\x25\x5c\xb7\x7b\x1c\x05\x7e\x38\x12\x92\x27\xbe\x1c\x51\x14\xe1\x0b\xc8\x81\x98\x97\x07\x24\x46\x04\x45\x17\x20\x79\x82\x5f\xc4\x48\x1e\x84\xea\x76\x8f\xa5\xfa\x9f\xa3\xab\x5d\x42\x7d\xf5\x2f\xc5\x73\x7a\xd2\xf6\x58\xb1\xda\x8e\xf1\x48\x8f\x06\xd0\x96\x25\x12\x50\xa1\x27\x80\x18\x71\x14\x61\x89\xb9\x30\x50\x47\x50\xc5\x46\x3d\x29\x9b\x58\x64\xcc\x8f\x25\x04\x26\xb0\x4b\xc2\x10\x08\x15\x52\x33\x35\xdb\x95\x3a\x52\xbc\x75\xec\x8b\xb7\x6e\xfc\xcf\x82\x79\x80\x43\x2c\x71\x5f\xd4\x4d\xeb\xdf\x13\xd3\x49\x18\x9e\x8a\x6c\x18\xf6\x42\x37\xe6\xec\xef\xd8\x97\x6d\xe8\x0a\xff\x80\x23\x74\x61\xff\x05\x20\x8f\x31\xbe\x00\x25\x02\xe8\xbe\x04\x2b\x64\xbe\x9e\xd9\x67\x00\x0b\x89\xe8\xbd\x12\x54\x5b\x07\x43\x45\x88\x1e\xff\xf5\x86\xae\x54\x07\xa3\x98\x4a\x8d\xa4\x69\x6a\xf1\xb5\x63\x49\x3f\xb4\x22\xaa\xcc\x67\x46\xba\x90\x20\x1b\x95\x38\xfb\xe5\x17\xfb\xe7\x97\x2f\x67\x29\x7a\xea\xd7\xf4\xef\x2f\x5f\xce\x84\x81\xa1\x7e\x55\xc3\xf8\xf2\xa5\x04\x2d\x46\x1c\x53\x39\xf2\x19\x55\xea\x0a\xf3\x2a\xc5\x72\x89\xe6\x73\x8c\x24\x2e\x0d\xd6\x25\xee\x8a\x6f\x38\x46\xc1\x48\x92\x08\xb3\x44\x5e\xc0\x79\xe9\x9d\x16\x15\x4d\x2f\x0d\x7f\xd7\xdf\x1a\x6a\xb2\xad\x6b\x4a\xed\xbf\xcd\x64\x65\xff\x90\x38\x8a\x43\x24\xf3\x1f\xca\xc3\x1b\x55\x27\x35\xe6\x2c\xc6\x5c\x12\x2c\xf2\x39\x44\x94\x32\xab\x56\xf2\x1f\x9d\xc8\xe8\xd6\x41\x40\x54\x5b\x14\xde\x38\x60\x39\x19\xac\x38\xf4\x3d\xb3\x5a\x65\x92\xf7\x5a\x68\x54\x5a\x31\x83\x5b\x6a\x14\x51\xa2\x0c\x81\x4f\xf8\x08\xf7\x28\x4c\x30\x44\x28\x06\x79\x40\x52\x1b\x22\x5b\x0c\x02\x4b\x65\x26\xe0\x47\x89\x39\x45\x21\x48\xc6\x42\x51\x46\x89\x81\x90\x8c\x63\xa5\x48\x01\xf1\x2d\x91\x1c\xf1\x23\x44\x58\xa2\x00\x49\x34\x56\x8b\xf3\x08\x88\x63\x6d\x1a\x7d\x4e\x30\x3f\x6a\x73\x43\x35\x17\x07\x96\x84\x01\x6c\x71\x09\xa2\xd2\xf2\xda\x38\x83\x87\x03\xa6\x10\xb1\x80\xec\x8e\x4a\x6d\x1b\x82\x89\x31\x4c\x43\x96\x04\x5a\xcd\x3e\x90\x30\x84\x98\xc5\x89\x9a\x29\x10\x2c\xc2\x45\x9a\x97\xc0\x26\x42\xc1\x18\x0c\x78\x42\xc7\x7b\x6d\x15\xa2\x98\x88\xb1\xcf\xa2\xc1\x40\xd9\x14\x83\x81\xe6\x77\xba\x1f\x7f\xa2\x48\x92\x7b\x3c\x0e\xf0\xfd\x60\xa0\x99\x42\xc4\xc8\xc7\x42\x8d\x85\x94\x81\xee\x08\x0e\x03\xd8\xb1\x30\x64\x0f\x02\x7e\x4a\xb6\x98\x53\x2c\xb1\x28\x62\x51\x80\xa1\x6d\x29\x6d\x6b\x88\xa1\xa6\x00\x4f\x42\x2c\xc6\x25\x90\xd7\x8a\x98\xda\xe0\x48\x0d\xbc\x4f\x19\xd8\x31\x61\x67\x01\xf3\xc5\x59\x22\x30\x1f\xed\x13\x12\xe0\xb3\x62\x4f\x19\xa0\x2d\xa1\x88\x1f\x27\x89\x3c\x30\x4e\xfe\xa1\xdf\xf6\x60\xc0\x0a\x1f\xfd\x58\x07\xe2\x6a\x6c\x80\x59\x81\xd3\xfe\x4d\x89\x07\x57\x58\x2a\xe3\x52\x64\x26\x9b\xf9\x16\x4a\x1f\xc3\x0e\x23\xc5\xa4\x45\x1a\xb9\x56\x9a\x1e\x34\xc7\xe8\xd3\x3e\x44\x42\xfc\x2d\x11\x52\x19\x7f\xb5\x81\xb7\x2c\x22\x17\x01\xdc\x00\x2b\x1f\x95\x06\x35\xdb\x19\xfe\xa5\x72\x08\x84\x06\xea\x0b\x2c\xd4\x22\x51\x9b\x82\x1c\x9e\xe5\x46\xbd\x4b\xf8\x7b\x11\xf6\xb8\x02\x1c\xe0\x2d\xe3\x10\xa5\x3c\x91\xea\x32\x46\x0b\xa3\x1d\x82\xc0\x58\x33\x8b\xb8\x38\x3b\xf3\xd5\xe2\xb0\x1c\xae\xb8\xfb\xcc\xf0\xc2\x08\x15\xc9\x9a\x72\x11\xa1\xfb\x51\x0e\xa8\xb2\x5e\xf0\x25\xde\xa1\x24\x94\x6e\x02\x6e\x19\x0b\x31\xaa\x12\xa3\x42\xc1\xdb\x0c\x48\x07\xd1\xd6\x3c\xc1\x0e\x8a\x29\xb6\x08\x0c\x80\x54\xfe\x0e\x84\xe5\xee\x1a\xa5\x4a\x23\x84\x98\x85\xc4\x3f\x8e\x15\xf0\xb7\x28\x54\x5b\x38\xf3\x59\xa5\x99\x96\x20\x5b\xac\x76\x2d\x4a\x2e\x05\x19\x54\x3f\x24\x98\xca\xfa\xaa\xe9\x92\xbe\x53\xfd\x5d\x13\xd3\x4f\x32\x09\x49\x02\x4c\xd5\xbc\x63\x9e\x2d\x80\xc9\xcd\xcc\x76\x3b\xae\xa0\x61\x77\x9f\x4f\xc5\xc6\x7e\xde\x8d\xd4\xbd\xdd\xe5\xf6\x45\x4e\x6b\xf6\x35\x89\x70\x0f\xcc\x0c\xf7\x5e\x40\x80\xac\x82\x6e\xc1\x3a\x83\x5b\x68\xa3\x6c\x82\x05\x0d\x8f\x25\x73\xa1\x36\x96\x45\x22\xe3\x44\x02\xa3\xe1\x51\xeb\x1e\x83\xa3\x1a\x93\xea\x72\x5c\xea\x33\x17\xac\x23\x12\x45\x89\x54\x0c\x50\x81\xae\xbf\x66\xfc\x09\x84\x37\x1f\x3e\x0f\x7f\x2f\x42\x24\x54\x76\x81\xd9\xf8\x1a\x57\x80\xe2\x5f\x35\x3b\x6a\xa1\x04\x29\x82\x27\x0f\xcc\x18\x4a\x5f\x7f\xea\x2e\x33\xb8\xcf\x9f\x3a\x8d\xe3\x93\xa7\xae\x08\xfb\xe4\xe9\xbb\xcc\x3f\x6e\x42\xf7\x56\xe9\xe0\x98\xb3\x7b\x12\xe0\xa0\xf8\x2a\x9d\x28\xab\x12\x73\xc4\xb1\x44\xfb\xd3\x51\xf1\x24\xda\x3f\x8f\x96\x13\x10\x47\x21\x71\x34\xda\x63\x8a\xb9\x66\x9b\x1d\xa1\x7b\xcc\x63\x4e\xa8\xb4\x2b\x9c\x88\x74\xf5\x97\x04\xab\x1d\x4b\xea\xc7\x18\xc3\xb5\xb1\x0c\xf3\x0d\xa5\xc4\xbe\x34\x56\x9a\xd5\x61\xe0\x33\xba\x0b\x89\x2f\x21\x48\xd4\xd0\x20\x89\x15\xe3\x88\x93\x67\x10\x3f\xc6\x84\xff\x0a\x3c\xea\x65\x70\x9f\x47\x57\xa5\x9a\x91\x5d\x49\x41\x46\xa2\xa1\x26\x98\xc2\x00\xd0\x4e\x62\x0e\x0f\x07\xe2\x1f\x80\x94\x55\x60\xaa\x77\x62\xcc\x23\x14\x61\x2a\xc3\x63\x0a\xe9\x64\x3a\xd9\x69\x75\x32\x3a\xa1\x12\xef\x31\x77\x10\x8a\x50\xf9\xfd\x77\xcd\x44\x7a\x97\x01\x7d\x2e\xf3\xd1\x24\xda\x6a\x8f\x9a\xda\x57\x30\xca\x24\xa3\xc4\x47\xa1\xf6\xf7\x28\xf1\x25\xb0\x00\x7c\x8f\xf9\x11\x2a\xd3\x06\x9a\x92\xca\xd6\xb5\xfc\xa5\x0c\x03\x2d\x17\x84\x76\x60\x0a\x89\xe4\xe9\x72\x81\xd0\xbd\x12\xa7\xa7\x2f\xc4\x99\xf9\xb0\xd3\x06\xb6\xed\x3c\x9a\x44\x4d\x14\xba\x31\x62\xc3\x0c\xc7\x22\xa4\x36\x5b\x45\x83\x98\x88\x4c\x82\xc0\x82\x02\xd3\x44\x1d\x96\xc8\xc3\xb1\x4c\x38\x35\x50\xfc\x84\x73\xc3\x46\x6c\x6b\xf7\x4f\x55\xc0\x43\xb5\xcf\x99\xcd\xdf\x2d\xbd\xd5\x6a\xb3\x5e\x4e\xde\xbe\x9d\x4d\x37\xb7\xf3\xd5\x8d\x37\x9d\xbd\x9d\x79\x97\x25\xd8\x64\x07\x94\x01\xc7\xf7\xc4\x58\x04\x02\x90\xaf\xb7\x45\x85\x56\x98\x26\x51\x91\x8e\xa3\x9e\xd0\xeb\xed\x26\x57\x57\xad\xef\x67\xf3\xb5\xb7\x9c\x4f\xae\x36\x8b\xf9\xd5\xc7\x7e\x2d\xaf\x16\x93\xcb\xcd\x8f\x93\xab\xc9\x7c\xea\x2d\xb3\x4f\x42\xb4\xc5\xe1\x6f\xb9\x1b\xbf\xd2\x1d\x9e\xbc\x86\xae\x51\xac\x64\xaf\x81\xae\x76\xe8\xda\x55\x6d\xb6\xe9\xc2\xac\x25\x1f\xd1\xa2\x24\x66\x7c\x8f\x28\xf9\x47\x79\x01\xa9\x6f\x94\xa1\xb0\x57\x16\x2f\xce\xb7\xd0\x65\xe5\x65\x88\xa2\x77\xe9\xe2\x80\xd4\xd2\xd2\x01\x0a\x13\x3c\x19\x94\xf7\x05\x5b\x12\x86\x0a\x25\xa3\x52\x86\x20\x98\x62\xbf\x63\x15\x9b\x1d\x09\x25\xe6\x9a\xe3\xf4\x06\x03\x02\xf6\x40\xb3\x8f\xfd\x03\xe2\x7b\x5c\x01\x7c\x04\x89\x51\x34\x84\xcc\xd9\x35\x04\x4c\xef\x09\x67\x34\xd2\xff\xd0\xcb\x7d\x08\x58\xfa\x63\xe7\xae\x68\x08\x8a\x57\xcb\x62\xb6\x71\x57\x94\x4a\xeb\x51\x84\x28\xda\x63\x6e\x76\x44\xc6\x58\xa4\x7b\xf3\xab\xfa\x23\xac\x4e\x1f\xa8\x21\x35\x83\x4d\xec\xde\x4a\xe9\x40\xb2\xd7\xfa\xef\xcc\x92\xb7\xec\xb6\x28\xfb\x3c\x4a\x2e\x0c\xdb\x5e\xcf\x81\xcb\x5f\xa1\xdd\x15\x2e\x6f\x45\x09\x66\xd9\x71\xc1\x04\x2e\x4e\xb3\x76\xb0\x29\x01\x6d\xfc\x10\x5a\xc6\xfa\x07\x44\xf7\x58\x18\xd5\x44\x99\xd4\xea\x89\x67\x91\xb2\x22\x87\xf7\x10\xb3\x21\x12\xf2\xda\x48\xed\x27\x98\xcf\x57\x85\xaf\x7f\x05\x1b\x5a\x21\x57\x31\xa4\xad\x86\x39\xdd\x92\x56\xf3\x26\xa4\xd9\xb2\x04\x4b\x2b\x2f\x9f\x32\x62\x07\x98\xe7\x0d\x7d\x8e\x22\x5c\x1a\xb1\xd9\xaf\x05\x99\x54\x1f\xeb\xf0\x99\xcf\x22\xb5\xc0\xca\x4c\x4e\x28\xdc\x71\xec\x33\xea\x13\xb5\x62\xef\xb4\x52\xca\x65\x62\xd5\x1b\x91\x35\xb5\x9b\x6f\xce\x7c\x2c\x6a\x10\xb3\x15\xf0\x44\x22\x2f\x31\x0a\x8e\xcf\x25\x71\x09\xc8\xd7\x24\xb0\x02\x9e\x2b\x4c\x2d\xa2\x89\x52\xbe\x7a\x9d\x82\xe4\x68\xb7\x23\x7e\xd9\xc1\x53\x24\xff\xf3\x48\x5e\x71\xa1\x6a\xf2\x3f\x97\xe4\x09\xf5\x0f\x2b\x89\xf6\x7d\xcc\xef\x1a\xa9\xb3\x8f\x3b\xed\xa5\x42\xdb\x36\x9b\x69\xb0\xd6\x54\x56\x6d\x95\x2a\xd8\x63\x40\x02\x02\xbc\x23\xd4\xc4\xb2\xff\xcb\xc6\xfa\xcd\x88\x6f\x42\x24\x15\xb9\x2c\x26\x65\xaa\xab\xaf\xc5\x7f\x7f\x63\xfd\xab\x35\x21\x2e\x31\x8f\xc4\x99\xe9\x6a\xa4\xbb\x12\xdf\x16\xbd\xce\x22\x89\x63\xc6\x2b\x0b\xe6\x6e\x72\x75\xf3\x7e\x72\x37\x84\xbb\x1f\xbd\xb5\xfa\xbf\x92\xab\x77\xef\x26\x77\xda\x05\x45\x99\xf5\xb0\x2b\x8e\x30\xa1\x69\x1c\x0c\xe1\xdd\x44\x1b\x56\x42\x24\x11\x0e\xc6\x70\xc3\x84\x20\xdb\xb0\xac\xc2\x8d\xca\xbf\x80\xab\xc9\xed\x7c\xfa\x7e\xb3\x5a\x4f\xde\x79\x45\xbb\x6a\x08\xb7\xf3\xd9\xf5\xcd\x95\x77\xed\xcd\xd7\xea\x9f\x37\x4b\xcf\xb4\x1d\x82\x37\x59\x5e\x7d\xdc\x4c\xa6\x53\x6f\xb5\x2a\x9b\x8e\x1a\xdb\x21\x28\x5c\x15\x1a\x43\xb8\xf4\x6e\x96\xde\x74\xb2\xf6\x2e\x07\xad\x16\x5e\x13\x1a\xa5\x46\x25\x8c\x4a\x6f\x32\xe4\x4a\xbf\x16\xf1\x2c\xbd\xd0\x68\x96\x7e\x51\x18\x97\x7e\x78\x57\xfe\x67\x3e\x8e\x9c\x93\x59\xdd\x13\xdc\x93\x8d\x59\xcd\xe5\x5b\x8b\x3c\xa6\xd0\x33\x4f\x59\x21\xe6\x9f\x83\xed\xb1\xe0\x74\x97\x27\xa3\x38\x47\x51\xa3\xb0\x1a\xd4\x5c\x2a\xbb\x44\xed\xbb\x3e\x27\x28\xd4\x1c\xa8\xfb\x34\x12\x2c\xdf\x6e\x58\x0b\x4c\x14\x9c\xa1\xea\xd9\x71\x16\x59\xcf\x9c\x6d\xb8\xc4\x9f\x13\x2c\xe4\xd8\x84\x02\x35\xc3\x3b\xdf\xdb\x40\xe2\x86\x04\x65\xe9\xf7\xd6\xee\x44\xf3\xa0\x64\x1a\x93\x2c\x86\x24\xb3\x88\x64\x21\x20\x99\x43\xfc\x92\x73\xab\xfd\xf8\x74\x12\xde\x94\x22\x7b\xce\x49\xb6\xb0\x9f\x3d\xc7\x79\x80\x73\x87\x39\xa6\x7e\x79\x4f\x31\xca\xe0\x5e\x18\x71\x93\xfe\x33\x35\x52\xeb\x98\xda\x18\xd4\x45\x31\x78\x69\xc9\xa1\x67\xa5\xd2\x7f\x41\xbf\xd4\xe9\x54\x77\xe9\x57\x08\xb5\xcc\xbf\x3e\x59\x71\x96\x79\x71\x99\xee\x5c\xb9\x92\x89\x25\x7f\x99\x92\x88\xf9\x66\x76\x8b\xab\x61\x1a\xe4\xeb\xd4\xa3\x98\x51\xbd\x65\x38\x60\xbb\x15\x51\xbb\x8e\xad\xde\x2d\x11\x09\x84\x4a\xe6\xf0\x15\xc0\xcf\x07\x4c\x01\x01\xc5\x0f\x15\x3d\x9c\x77\x6d\x6c\x24\xbd\x73\x41\x14\xf0\x23\x11\x52\x47\x1f\xa9\x7e\x6d\x7c\x59\xc1\xb0\x1a\x84\x44\xe2\x48\xfd\x03\x67\x94\x25\x22\x2c\xc7\x26\x62\xcc\xb5\x26\x42\xca\xac\xc6\x4a\x31\x23\x7e\x04\x21\x71\x2c\x72\x9c\x8b\x04\xa8\xa2\x6e\x2c\x88\x12\x4c\x3b\x9c\xb5\x5a\xb6\x99\xb6\x17\xe0\xa3\x30\xd4\xf6\x5d\xd1\x34\x50\xa3\x26\x21\xae\xda\x68\xa4\x66\x9e\x59\x40\x43\xb8\x4b\x5d\x08\x9b\xdc\xb5\xa4\x34\x9b\xb1\x71\x36\x6a\xc2\x8f\x1b\x6d\xe9\x98\xdf\xad\x6d\xb3\x51\x58\x25\x02\x8b\xbb\x61\x6d\x13\x7a\x97\x70\x72\x67\x48\x75\x40\xf7\x58\x99\x43\x54\x10\x25\x36\x8a\x5b\xda\x48\x67\xee\x45\x44\x44\x48\xfa\x07\xeb\x1d\x91\x98\x06\x15\x59\xa4\x87\x7f\x01\x0b\xea\x3b\x86\x05\xec\x1e\x73\xf8\x46\xf5\x6a\xd3\xf0\x70\x18\xa8\xdf\x77\x28\x14\xf8\x5b\xed\x9f\xe3\x58\x6d\x83\xca\xdb\xf9\x07\x06\xb1\x55\xc0\xc0\x12\xe9\xb3\x48\xa9\xdd\x0a\x78\x91\xf8\x3e\xc6\x6a\xd3\x6c\xe0\xe3\xcc\xc0\xd3\x38\x95\x20\xea\x41\x58\x27\x4f\x26\x5b\x8d\x00\xe1\x18\x1e\x90\xd0\x0c\xc6\x39\xe3\x36\x1a\x5c\xee\x6a\x87\x48\x88\x2b\x22\x53\x4f\xb8\xee\x29\xdd\x70\xef\x58\x42\x03\x6d\x3b\x2a\xb3\x85\x50\x14\x6e\x7c\x46\x8d\xd1\x38\xd6\x2d\x8d\x05\xd2\x62\x2e\x66\x63\x32\xae\x4b\x13\xce\x56\x43\xd2\x94\xb3\x1b\x42\x3d\x98\x8b\x6c\xae\xef\xcc\xac\x76\xcc\xbc\x93\x91\xcc\x97\x5d\x9c\x95\xb7\xb4\xef\xec\xc2\xcc\xdf\x96\x29\x53\x1b\xa2\xa5\x9f\x93\x3d\x9b\x38\x1c\xd1\x32\x9f\x35\xe1\x55\xe0\x63\xc5\x01\x7a\x3e\x8a\x1b\xad\x94\x27\xd2\x2f\x86\x55\x07\x02\x8e\x62\x79\xd4\xda\x84\xe2\x87\xf0\x98\xed\xcc\x2c\x9b\x88\x31\x4c\x1a\x0d\x7f\x3d\x47\x88\x84\x49\x85\x81\x7b\x30\x84\xa5\x68\xf6\x6f\x71\x37\x1e\x9c\xaa\xc2\xd2\xc4\x98\xd3\x13\x0a\xd6\xe5\x94\x1a\x68\xdc\x11\x38\x1a\x96\x74\xc9\xd2\xa6\xee\x18\x9b\x26\x45\x28\xf3\x3d\x19\x62\x66\xc4\x77\x38\x54\x0b\x80\x9b\xb2\x09\x1a\x72\x77\x5a\x46\x0b\xbd\xbc\x86\x70\x52\x0a\x82\x3b\x97\xa7\x46\x90\x9f\x96\xd7\x23\x21\x8f\x61\x29\xfd\xa5\x66\xac\x94\x17\x4c\x96\xb9\x35\x65\xd4\xe8\x5b\xff\xe8\x1e\x69\x3d\x86\x00\x2d\x71\x04\xc7\x28\xa6\x8e\xae\xda\x86\xb3\xc2\xd2\x48\xcd\x08\x3d\x92\x28\x89\xd2\x10\x02\xdb\xe9\xac\x2d\x2c\xa4\xd5\x17\x18\xa9\xcd\xa0\x5d\x6c\x69\x92\x64\x8d\xdc\x6a\x61\x70\xec\xe3\x8a\xf7\xba\x40\x82\x86\x29\x46\x9c\xa3\x2a\xa2\x4d\x43\x6b\x9d\x9f\xf7\x4c\xc9\x51\x2d\x2b\x08\x55\x9b\xd4\xac\x67\x33\x0e\xb3\x8d\x35\x2d\x12\x4a\xa4\x1a\x29\x7e\xc4\x7e\xe2\xc8\x37\x81\x9c\x9d\x53\x2f\x46\x35\x6b\xc4\xa6\x1a\x63\x1a\x8c\xb4\xa0\xa9\x59\x66\x79\xa3\x90\x08\x69\x17\x9f\xfa\xb3\xd2\x86\x48\x1c\x35\xf2\xaf\x73\x01\x74\x2d\xe9\x46\x7a\xd5\x13\xf2\xf2\x67\x04\x24\x2a\x7b\x12\xcc\xd3\xb4\x76\xcd\x83\xf8\xde\xf9\x7b\xdb\xec\x56\x86\x60\x17\x21\xdf\xd7\xb1\x85\x9a\x81\x3b\xe1\xfb\xc4\x78\x72\xac\x01\x87\xa9\xe4\xc7\x98\x11\x2a\x6d\x3c\x9b\xf9\x9f\x30\x37\x63\x19\x0c\xdc\x20\x01\xa6\xd7\x97\xda\xd0\x54\xc2\x8c\xd8\x9d\x19\x11\xda\x21\x9b\x3a\xec\xc7\xf0\x01\x71\xa2\xf3\xe8\xf2\x8d\x04\xfc\xe1\x9b\x0f\x93\xe5\x66\x3e\xb9\xf6\xbe\x6d\x00\x8d\x38\x06\xfc\x18\x23\x65\x4f\x65\x29\x48\x05\x66\x1c\x0c\x44\xd1\xe5\xae\xed\x06\x04\xf7\xb6\xaf\x06\xa0\xbe\x16\x39\x4a\xf3\x28\x39\x13\xde\xa7\x66\x44\x86\x99\xd2\x45\xc6\x92\x53\xf6\xbf\x8d\x68\xd8\xf8\x67\x03\xcc\x84\x1a\x67\xb4\x95\xf0\x85\x91\x81\x38\x52\x89\x1e\x53\x65\x87\x85\x8f\xe2\x34\x5e\x81\x20\x60\x49\x33\xa2\x7f\xf8\xc3\x10\x08\xbe\x80\x3f\x14\xc0\x8d\xc1\xb3\x10\x0a\x84\x34\x0e\x70\xac\xac\xc8\x6d\x4e\xb0\x61\x03\x58\x8e\xf7\x88\x07\xa1\xf5\x2e\x3f\x1c\xb0\xae\x56\x50\x03\x4e\x09\x67\x36\x11\xba\x72\x81\x32\x39\x2e\xa4\xf8\x35\x80\x4c\xc3\x0b\x8e\xcc\x3f\x89\xc4\x27\x71\x46\xa8\x5a\x7a\xa3\x00\x49\x34\xb2\x35\x1c\x26\xad\x4b\xcb\x91\x91\xcf\xa2\x08\xd1\x60\x84\x2c\x47\xe6\x59\xba\x67\xaf\x78\x42\x29\xa1\xfb\x11\xca\x5a\x11\x3a\x42\x23\x71\xc0\x61\x38\x68\x59\x0d\x1d\xd2\xa4\xd8\xb4\x5d\xa6\x98\xa7\x41\xb2\x98\xa7\x45\x3f\x96\x7b\xca\xa4\x4c\x63\x5b\x3b\xc8\xaf\x22\x07\xa6\x06\x56\x0f\x51\xe0\x65\x2b\xdf\x40\x1f\xc3\x9c\x49\x2b\xd1\x2d\xb7\xea\x12\x1f\x4d\x75\xcd\xe3\x0d\xc3\xac\x88\x0c\xf0\xe6\xeb\xe5\xc7\x9b\xc5\x6c\xbe\xee\x94\x11\x0d\x10\x3b\x24\x47\x1f\x19\xd1\x00\xb9\x59\x72\x74\xcb\x88\x06\x90\x2d\x92\xa3\x5b\x46\x34\xcb\xab\x46\xc9\xd1\x5b\x46\x34\xc0\x6e\x90\x1c\xfd\x65\x44\x03\xdc\xba\xe4\xf8\x1f\x19\xf1\xf5\x64\x04\xa6\xf7\x5f\x45\x3e\x78\xf4\xbe\x5b\x36\x5c\x11\x61\xcc\xbb\x7c\xb5\x64\x4c\xa0\x2d\x07\xa1\x4b\xb8\xca\x8b\xce\xbd\x98\x7f\x2f\xa2\x37\x18\x7e\x15\x9a\x74\x98\x7f\x4d\xc4\x6a\xb3\x03\xcd\x33\xaa\xfb\x3a\xf3\xa7\xdd\x24\x34\x4f\xd5\xd1\xee\x1e\x65\x2b\x6b\x41\xa7\x23\xbe\xfa\x34\x6c\x60\x8b\x21\x44\x17\x4b\x34\x89\x71\xf3\x5c\x27\x42\x4b\x55\x04\xd3\xcd\xec\xd2\x9b\xaf\x67\x6f\x67\xde\xd2\x78\x93\x22\x6a\xeb\x51\xf1\xa3\x8f\x71\x00\x7f\x7a\xf3\xc3\xf7\x7f\xd1\x99\x17\xc8\x97\x98\x8b\x66\xc8\xda\x23\xf7\xf5\x09\xf4\x41\x81\xed\x4b\xa1\xc1\x29\x7a\xaa\x95\x46\xb9\x0e\x8b\x39\xbe\x27\x2c\xc9\xa3\x86\xee\x35\x68\x16\x5f\x2b\xcc\x7c\xfb\xa6\xcb\x5e\xe8\x11\x38\x4b\xa4\x7b\x06\x45\x3f\x33\xda\x02\x7e\x8a\x31\xdd\x0a\xb1\xb7\xba\xac\x28\xc6\x0e\xa0\x5f\x51\x69\x16\xf0\x7c\x92\xea\x6c\x05\x5b\x31\xbd\x6d\x35\x82\x16\xb4\x2f\x5f\x0e\x33\x27\x6e\xea\x68\x08\x31\xdd\xcb\x43\x2b\x44\x22\xec\x52\xda\x1e\x95\xd2\x75\xeb\x43\xf3\x98\x05\x90\xa6\xc3\xb6\x48\x24\x25\xd4\xf4\xaa\x5b\x55\xa3\x4a\xe5\xa7\xd0\xa6\x7b\x7d\xb6\x8a\x69\x68\x58\x9f\x1d\x08\x9c\x24\xdf\xfb\x01\x2c\x7b\x7e\x74\xeb\xcc\x73\xe5\x5a\x4f\x03\x61\xc8\xd0\x26\x19\x4f\x27\x7c\x4b\x8b\x3e\x2a\x45\x3d\x02\xfb\x1c\xcb\x9f\xf0\x71\x89\x77\xed\x2d\x7b\xcf\x10\xd4\x67\x69\x55\xe8\xa5\xef\x97\x27\x4d\xd5\x09\x1d\x54\x9c\x76\x21\xf6\xa5\x50\xbb\x1a\x0d\xc1\x88\xc6\xbc\x10\xdf\x95\x5d\xee\x7a\x4c\x9c\x59\x47\xd7\x0c\x2a\x70\x6d\xa2\x9f\xed\xaa\xb0\xdb\x6e\x48\x9f\x91\x45\xb0\xa3\x59\xdf\x69\x87\x6c\xea\xbb\xdb\xf5\x56\x9d\xe9\xe3\x9c\xfc\x1e\xdf\x95\x75\x69\xd9\x5f\x4e\x0b\x26\x87\x9d\x2a\xd2\x3d\x2d\xea\x71\xce\x4a\x16\xc6\xff\xc5\x00\xdb\xe8\x62\x61\x20\x5d\xcc\x63\xe9\x51\x40\x22\x55\xbd\x20\x14\x86\x36\xe4\x3e\x76\xa5\x07\x98\x2f\x74\x22\x40\xde\x65\xaf\xfe\xc8\xae\xde\x25\x82\x80\xec\xb4\x92\xca\xaa\xcc\x5a\xc5\x7a\xfa\xb4\x85\xf2\x9b\x9e\x62\x88\xdf\x10\x32\x8d\xed\xf7\x9e\xdb\xe6\x90\xbf\xeb\xb9\xaf\x97\x8e\x35\x3d\xcf\xe3\xcd\x7a\x91\x59\xd3\x53\xcb\xae\x70\xf1\x55\x3a\x45\x16\xff\xae\xc5\x6f\x9e\xa9\x31\x61\x06\x26\x6c\x37\xc8\xf4\x88\x4d\xce\x33\x29\x58\x26\xb0\xef\x0e\x6a\xb8\x1f\x9d\x88\x57\x93\x64\x7d\x50\xfa\x35\x58\xa4\x3f\xa1\xbb\x38\x45\xbb\x95\xda\xb6\xbe\x2d\xbc\x50\x2d\x8e\x70\x3a\xe8\xa1\x45\x14\xdd\x2e\xaf\x52\x29\x94\xa9\x22\x83\x91\x5a\x93\x69\x32\x5f\xfa\xa6\x61\xb4\x4b\xbc\x27\x42\xf2\xa3\x9a\xd4\x4b\xe3\x2b\xeb\xe7\x23\xf1\x19\xf5\x71\x2c\xf5\x1f\x56\x0d\x9e\xe9\xce\x85\x6b\xe5\x37\xef\x18\x4f\xa5\x53\xe3\x2e\xb1\x44\xa6\xe2\xae\x30\xdf\x6b\x64\x89\x83\x80\x94\x9a\xbd\x9c\xaf\x36\x57\x93\x1f\xbd\x2b\x17\x23\xea\x04\xc5\xaf\xe2\xd5\xb8\xa9\xa5\x3a\x3a\x51\x4e\xfd\x1a\xba\x63\x65\x5e\xe3\xc7\x98\x09\x6c\xd4\x79\xd9\x99\x01\x0b\x1a\xba\xbb\x06\xb5\xcc\x4c\x84\x4c\xc1\x49\xb7\x24\xd9\xc0\x8d\x02\xcb\xe9\x60\x3a\x8b\xcc\x2e\xb8\x01\x62\x48\x84\xc4\xd4\xa4\xf0\xe8\x1c\x1c\xb5\xf2\xf9\x0e\xa9\x0d\xe5\x37\xe7\x63\xfd\xdf\xb7\xa9\x4b\xb6\x4c\x70\xc9\x1a\x60\xaa\x2d\xb7\xef\x63\x93\x2c\xa2\xf7\x77\x2c\x22\x52\xe7\x05\x21\x83\xb9\x8d\x58\xa6\xbb\x2f\xff\xc0\x04\xa6\xb5\xfc\x82\xc2\x7c\x21\x61\xa3\xd7\x15\x14\x0e\x9c\x25\x7b\x93\x0a\x73\xb3\x58\xae\x9d\x26\x71\x03\xcc\x54\xfa\x15\x07\x64\xc9\x01\x4d\xe2\xeb\x5f\xda\xa7\xd4\xcc\xaa\xfd\xcc\xb9\x8c\x50\x0a\x50\xf7\x06\xab\x5b\x85\xb4\xc5\xc8\x9d\x03\xac\x04\x95\x15\x1e\x7d\xb7\x4e\x37\x05\xb6\x2b\xcf\xba\x99\x72\xa1\xe6\xbc\x7e\x84\x45\xf5\x89\x32\x97\xd2\x3d\x0a\x49\x00\xeb\xe9\x4d\x91\xa3\x87\x70\x0e\xff\x91\xc3\xde\xe8\x57\xff\xd1\x0a\xf1\xfb\x3f\xff\xf9\x4f\xdf\x37\x2b\xcb\xdf\xdf\x1b\x37\xdb\x15\x73\xb2\xd3\x2c\x12\xf3\xd3\xd1\x96\x6f\xc6\x9c\x49\xe6\xb3\xb0\x75\xa4\xe6\xd4\x02\x7d\xd2\x1c\x7b\xc0\x59\xb5\x16\xe2\x18\x5e\x2a\x5d\xf4\xfa\xa5\xde\x14\xbd\x3c\xbc\xf1\x5f\xba\x08\x92\xea\xfd\x56\xc1\xdd\xb2\x52\x6a\xe9\x9a\x16\x5c\x7b\xe3\x8e\x45\xd5\x0e\xa5\xac\xdd\xa7\x2c\x8a\x13\x89\xb3\x9e\xd3\x3d\x99\x09\x8b\xeb\x74\x4d\x22\xba\xdc\xda\xb5\x33\x50\xba\x54\xb8\x90\x8c\xa3\x3d\x3e\x8b\x95\x61\xa4\x58\x5d\x8e\xee\x59\x98\x44\x58\xbc\xca\x28\xea\xb6\xe7\xbb\x85\x82\x1f\x27\xb3\x20\x6c\x61\xd0\xb6\xd3\x30\x1c\xc4\xb6\x0b\xdc\x40\x6d\x6c\x5e\xa2\xea\x25\x36\x89\x5c\x58\x64\xee\xaf\xe9\xcd\x6d\x7e\x7a\x8e\xd6\x11\x52\x86\x38\x00\xd6\x26\x8c\x28\x93\xc0\x12\x29\x48\x80\x8b\x99\x34\x4d\x93\x60\xce\xa9\xe9\x1a\x78\x87\x0b\xa3\x5f\x3e\x54\x15\x68\xc7\x62\xaf\x26\xe9\x6b\x44\xfb\xd1\x72\xa0\x4c\x0f\x88\x70\xc4\xf8\xd1\x24\xac\xdf\xdc\x9a\xba\x47\x53\xd3\xa1\x5d\xf2\x4c\xe2\x8b\x96\x98\xac\x7a\x98\x02\x93\x7d\x93\x2e\x74\xa5\x70\x53\x80\x83\xc1\xeb\xc1\x60\x08\x83\xc1\x1b\xf5\x3f\xd5\xd5\x60\xf0\xdd\x60\xd0\xb6\x69\xb0\x27\xe2\xc0\x77\x1a\x88\x5d\x3a\x02\x90\x84\x10\x23\x21\xe1\xcd\x3b\xa2\x26\xce\x60\x6f\x4c\x21\xd3\x71\x1b\xa2\x3b\xeb\xdf\x8c\x75\xe9\x89\x09\xa1\xea\x74\x64\xfb\x66\x30\xf8\x9c\x20\x2a\x89\x3c\x0e\x06\xf0\xe9\x2f\xc2\x4c\x41\x0b\xc4\x74\x45\xee\x89\x3c\x24\x5b\x5d\x30\x93\x2f\xce\xe2\x9f\xdb\x90\x6d\xcf\x22\x24\x24\xe6\x67\x42\xea\x72\xca\x33\xc1\xfd\xb3\x4f\x7f\xd1\x6b\x18\xc5\x24\x42\xfe\x41\xc9\x81\xe3\x59\xfc\x69\xaf\x7e\xc8\xca\x32\xcf\x52\xa4\xc6\x7b\xe6\x5a\xba\x66\x7d\x5f\xb3\x84\x7e\x25\x2b\xf7\x43\x01\x60\xb7\xa0\x33\xad\x95\xbc\x8f\xd4\x17\x79\xba\x79\x1e\x26\x37\xc7\x7f\x9a\xd4\xf4\x7f\x43\x53\xab\x93\x5e\xcf\x8c\xe3\x8d\x0c\x65\x6f\x50\xa3\x37\xbe\x8f\x41\x97\xc1\xf8\xfa\x06\xc6\x75\x07\x7a\xd0\x1c\xf3\x53\x5f\xb9\x37\x1c\x48\x1a\x73\xa3\xdd\xca\x50\xeb\xde\x30\x60\xae\x03\xf4\x48\x15\x70\x1d\x12\x54\xa2\xde\x42\x85\xc1\xc5\x40\xfb\xe8\x5a\x61\x5a\x0f\xcc\xff\xbd\xb2\x90\xc5\x10\x48\xb6\xfd\x0a\xf1\x4e\x9a\x8c\x65\x9d\xbe\xae\x2d\x44\xa6\xf4\xd0\x03\x11\xed\x0e\xa8\x2d\x86\x3b\x53\x5f\x27\x3e\x87\x77\xe6\x08\xde\x34\x4d\x33\x0f\xc3\x59\x32\xd8\x45\x55\x2b\x8e\x76\x00\x45\xf7\x88\x84\xe6\x1c\x39\x51\xe8\xe1\xec\xbf\x52\xe0\xff\x7d\xe7\xae\x11\x6f\x05\xcc\xa8\x8b\x0e\xba\xa8\xbc\xb9\xe2\x5b\x75\xab\xcd\x91\xe8\xa8\xfe\xf4\x19\xa5\xd8\x97\x23\x9e\x34\xf7\xf5\xfb\x5b\xbc\x45\x87\x30\xb1\x1b\xe8\xbc\xe4\x22\xf5\x3e\x20\x3b\x25\x65\xf1\x95\xe5\xa5\x7a\xf9\x4e\xf4\xe9\x67\xac\x79\x0e\x68\x4d\x5f\x38\x45\x92\x0b\x40\xa5\x8c\xb4\x36\x7c\x5d\x4a\x2a\x10\x0d\xb6\xec\xb1\xb4\xa1\x96\x0c\x0e\x4c\xc8\x4a\x76\xad\xbb\x2e\x13\x0a\xb5\x99\xde\x7f\x7a\xd3\xdb\xf5\x6c\x31\xdf\x78\xf3\x0f\xb3\xe5\x62\x7e\xed\xcd\xd7\xe5\x22\x4d\x77\x93\x77\xde\xfc\x75\x3d\xb1\xb0\xb1\xed\x9b\xaa\x2e\xac\xd6\x67\x82\x29\xa7\xec\x42\xa7\xe7\x17\x0a\xbb\xfe\x4d\xdf\x94\x9a\xd6\x0f\xda\x80\xdf\x36\x75\xbe\x76\xf0\x06\x34\x67\xcd\xdb\x93\x11\xda\x13\xe6\xb9\xa3\xf0\xfc\x34\x94\x1c\x55\xe7\x35\xa4\xd6\x26\x05\xfc\x73\x62\x03\x35\x39\x4e\x29\x37\xce\x76\x95\xba\xa6\xd4\xf7\x54\x23\x16\x91\x99\x03\x0a\x25\x92\x29\x11\x68\x0e\xde\xc9\x0f\x82\xda\x22\x61\xce\xe9\x2e\x16\xa0\xa9\x8e\xcb\x63\x17\x3e\xaa\xd6\x0c\x76\x4c\x67\x35\x72\x65\x00\x9c\xb4\xb4\xdd\xdf\x94\x83\x8e\xa6\x89\xe3\xec\x9c\xa6\xdc\xf8\x36\xb3\x21\x42\x8f\x33\xab\x40\xa6\x4a\x9f\xba\x84\x74\x97\xeb\xa7\xcb\xe5\x53\x35\x23\x2a\x5d\x3a\xbe\xa8\x9c\x0b\x53\xad\x88\xa8\x96\x3f\xd8\xd2\x08\x4d\x04\x47\x99\x6a\xfe\x58\xe3\xe1\x80\xaa\x65\x11\x9a\x14\x84\xfe\xe6\xa4\xa8\x74\xd9\x49\x0a\x42\x7f\x75\x52\xd8\x42\xe3\x89\xef\xbb\x88\xd0\x7f\xe5\xaf\x4a\x70\xda\x38\xda\x79\x5e\xc9\x6c\x72\x9d\xa2\x02\xc8\xc0\x00\x24\x04\xf3\x09\x4a\xf3\x89\xeb\x92\xb2\x20\x34\xf2\x70\xaf\x3d\x50\x6a\x9d\xff\x23\x03\xc8\xb1\x3d\xba\xd4\x1e\x4b\xa5\xcf\x80\x94\xf5\xdd\x53\x7a\x24\x9c\x49\x12\xcd\x6b\xdc\xf4\x26\x37\x28\x3a\x2b\x90\x2c\x9d\xb5\x5f\xc4\xa8\x06\xf5\x80\x84\x3d\x29\x21\xcf\x61\x1e\x96\x07\xa1\x65\x59\x7a\x2c\x68\x7e\x1c\x68\xe0\x3c\x61\x14\xaa\xe3\x73\x17\xc9\x34\x87\xe6\x8a\x61\xb8\x19\x8a\xce\x5a\xa7\x30\x8b\xb4\x61\x35\x7d\xa5\x97\xe9\x59\xd4\x4f\xe6\x9d\xb5\x01\xd0\xc6\x34\xd7\xe8\x11\x90\x75\x3a\xea\x93\xe7\x76\x69\x90\xd3\x9e\x1b\x2f\x99\x1a\x4d\xcc\xa8\x39\x6d\x3f\x75\xfe\x94\x69\x62\x4d\xde\x27\xd7\x42\x19\x6b\xb1\x55\xe5\x4e\xcc\xb9\xec\x6c\x97\x36\xd6\x7b\x68\xf4\xa9\x68\xd2\x4b\x56\x28\xcc\xfa\x17\x2a\x6d\x72\x0f\xbf\xbd\xae\xc9\xb9\x03\x6e\xdf\xd9\xea\x1d\xc8\xea\x73\x98\xca\xca\xaf\xe3\x2b\x9e\x56\xa0\x3e\xc1\x65\x6c\xc7\xdf\x0b\x52\x89\x29\xd4\x46\xcd\xb1\xf1\xb2\x4c\x20\xca\xf7\x79\x64\xb2\xbd\xc9\xc7\xa1\xa4\x8e\x63\x5f\xfc\xe1\x2b\x6f\xe3\x76\x0d\xe7\x32\x1f\xd8\x83\xe5\x60\x05\xa0\x30\x2e\xed\x6d\x74\x9d\xc6\x53\xa5\x6f\x47\xda\x5a\x6b\xfa\x52\xb7\x53\x24\xa3\x5f\x97\x67\xb7\xd9\x75\x06\xae\xb3\x11\xdb\x67\xc5\xb5\x01\xcb\x49\x93\x89\x29\x4b\x35\x45\x4a\x7d\x7c\xd9\x10\x50\x9b\x73\xb3\x5a\xf1\x9c\xcd\x2e\xa3\x82\xa9\x89\x75\xcd\x72\x46\x80\x36\x57\xec\xd7\xe6\x16\x78\x16\xc7\xb4\x00\x55\xbc\x94\x67\x60\xa5\x09\x52\x17\xf9\xa9\x29\x17\xbf\xa4\x03\xfe\xd2\x9c\xd3\x74\x82\x2b\x12\x4e\x72\x47\x42\x97\x4b\x12\x9e\xe0\xef\x38\xa9\x7d\xbf\xdc\x9b\x52\xce\xcd\xe7\xf0\xac\x55\x78\xa5\x8f\x55\xf9\x29\x7d\x1b\x1c\x30\xbf\x75\xee\x48\xee\xdb\x31\x02\x79\x20\x1c\xfb\x39\xf3\xb4\xa5\x2d\x9e\xaa\x40\x5a\xd2\xd7\x7a\xa9\x8d\x96\xef\xcb\xa2\xc3\xa6\x89\x15\xac\xd5\x2c\xe3\xb4\x28\xfb\xd3\x23\x15\x1b\xd5\x04\x11\x56\xd5\xf4\x2d\xab\xaa\xc5\x1b\xd3\x20\xa3\xe9\xbd\xad\x30\xaa\x53\xa4\x57\x95\xba\xb3\x59\xbb\x43\xfd\x99\x7a\xc1\x5a\xd1\xd7\x2c\xe8\x0c\x76\x76\xe5\x3d\xf4\xcb\x7a\xa8\x9d\xb8\x9d\xf5\xdf\x53\x87\xcc\x0c\x1a\x39\x27\xa0\xf4\xf4\xed\x88\x05\x18\xb6\x44\x66\xf7\x08\xb4\x3a\x7e\xd3\xb3\x37\x74\xbc\x06\xb6\xc7\x94\x14\xe3\x42\x79\x8d\xc9\x1c\xdc\x62\xf9\x80\x31\x85\xf3\xf3\xf3\xf3\x96\x5c\x1e\xf5\x9c\xff\xf0\xc3\x0f\xf0\x0d\xf3\x25\x0a\xbf\x1d\xa6\x10\x75\x21\x0a\x83\xf3\xef\xbf\xfb\x6e\x0c\x97\x84\x63\x5f\x32\x4e\xb0\x28\x04\x03\x5a\x40\xc6\x48\x1e\xb2\x9b\x54\xd0\x6e\x87\xf5\x41\x48\x69\x50\xdd\x7a\x40\x4c\xf8\x52\xc0\x1f\x41\x91\x87\x53\x14\x86\xc7\xa6\xca\x65\xd0\xf9\x56\x49\x84\xc4\x27\x45\xb5\xf3\x37\x6f\xde\xe4\xfe\xa2\x58\x5f\xb1\xa6\x77\x0b\xf4\x08\x94\xd1\xd1\x3f\x30\xb7\xa7\xd8\x8d\xe1\x8f\x2d\x20\xd7\xb6\x1a\x35\x4f\xb2\x74\x4c\x91\x0e\x4a\xa6\xd3\x34\x86\x15\x6b\x43\x52\x35\xd6\xa4\xcc\x00\x9a\xf9\xb0\x4b\x3d\x64\xec\x13\xe0\x47\xe4\xcb\xf0\x08\xc8\x1e\x87\x7c\x88\x58\xdb\xfc\xd0\x24\xc2\x9c\xf8\x90\x9e\x96\x91\x96\xb4\x84\x18\x05\x6a\x9a\xd4\x60\x15\x5a\x11\x56\x90\xa3\x38\xc4\xe2\x42\x31\x76\x1b\x1f\xa9\x2e\x41\xcf\x3b\xfa\x2b\x7f\x78\xfc\x76\xa8\x6b\xf9\xd4\x84\x17\x98\x01\x18\x87\x3f\xbf\x7e\x0d\xdf\x6c\x91\xc0\xa3\xd7\xe7\xdf\xb6\x99\x21\xca\x32\x36\x60\xbf\xff\xee\x1c\xbe\x49\xfe\xca\x1f\x86\xfb\xbf\xf2\x02\x64\xfd\x7b\x0e\xf9\xbb\xd7\xdf\x9f\x0a\xf9\x87\x3f\xff\xd9\x40\x7e\x54\xa0\x1f\x87\xec\xaf\xbc\x84\xba\x7a\x9d\x75\xd0\x02\xf1\xbb\xff\xfd\xa7\x42\xd7\xf0\x47\x1b\x5a\xd0\x27\x3a\x6d\xb1\xb9\x00\xd1\x9e\x0b\xaf\x69\xad\x23\x48\xc0\x62\xd7\xe1\x26\xc5\x47\x0b\x75\xc3\xee\xe6\xe8\x1b\x12\x1a\xd6\x19\x42\x48\x3e\x61\xd8\x89\x77\x9c\x25\x71\x5e\xcb\xc3\xb1\x70\xb9\x03\x0a\x13\x65\xcc\x46\x83\x40\x2e\x2b\x04\x96\xe3\x0e\xa4\x5b\x80\x96\x86\x73\x12\xd2\x6d\xab\x53\xe8\xbd\xba\x1a\xcf\xb0\x86\xaf\x9f\xee\x71\x14\xe2\x4d\xb6\x5d\x87\xf1\xf5\x14\x53\x5f\x41\xec\x27\xa2\x67\x3b\x48\x68\x21\xb5\xaa\x10\xb9\xd4\x72\xc6\xa6\x88\x22\x4d\x9e\x16\x32\x3c\xe8\xe3\x8b\xb5\x3b\x9c\x88\x42\xfe\xfe\x10\x38\x0e\xf5\xf1\xc7\x8a\x55\x0b\x21\xe9\xb1\xde\xf6\x6d\x94\xc4\x6c\x5b\x04\xe5\xc4\x2f\x05\xf7\x13\x3e\xe6\x15\x73\xc2\x64\xda\x6a\xac\xed\xc5\x28\x92\xc1\x0e\xcb\xd6\x00\x6d\x63\xfd\x4a\xc6\xa0\xb1\x89\xff\x9a\x5e\xa4\xad\xc8\x68\x63\xad\x5d\xce\x43\x86\x64\x59\xcc\x34\xb5\x60\xf4\xb9\x76\x7a\xae\xb5\x86\xb0\x91\xd5\x0e\x99\x7a\x34\x81\xbf\x34\xd1\x0d\x59\x0d\xa3\xcb\x76\x3a\x73\xdc\xff\x19\xb6\x0c\xfd\x2b\xdc\xba\x2d\xcf\x76\xb6\xee\x57\x5d\x34\xd2\x14\x6c\x53\xe1\x3d\x6b\x8a\xa2\x56\x2b\xcc\x3c\x7d\x73\x50\xe1\x84\x3c\x54\x70\xe5\x39\xb4\x18\x64\xe6\x71\x9b\x65\x46\x61\x3b\x4c\x31\x6d\xaf\x74\x80\x04\xcd\xed\xc3\x72\x36\x6a\xd1\x02\x7b\xad\xf9\xb4\xa8\x58\xbb\xcb\x31\x66\x3b\x38\xb7\x95\xa0\x4a\x66\x0e\x0b\x59\x07\x83\xdc\x8f\x6c\x50\x6e\x3f\x9f\x26\x7f\x94\x90\x70\xda\x5c\x4d\x96\x55\x27\xc4\x2e\xcb\xcb\x61\x5f\x75\xc2\xec\x63\x7f\xd5\xad\xac\x4e\xb0\xfd\xac\xb0\x9a\xad\xd5\x09\xb7\xa7\x2d\x96\x5b\x5c\x9d\x10\x4f\xb3\xc8\x3a\xd3\x65\x20\x33\xf7\xbe\xb6\x5d\x66\x9e\xa7\x58\x67\x9d\x40\x15\x1a\xa7\xd9\x68\xdd\x20\x9f\x64\xf4\x74\x2f\x81\x92\x25\xd7\x66\xaf\xb5\x97\xe9\xc5\x1d\x99\x5f\x70\x5a\xd1\x5b\xb5\x30\xa6\x5d\xd6\x43\xfb\xa9\x85\x99\xe5\xa2\x95\x6e\xa9\x1c\xb3\x93\x3e\xfd\x8e\x07\x49\x9f\x9e\x15\x80\x4f\x27\x44\xbf\x82\xb4\x5f\xa7\xea\xef\x57\xa8\xf8\x7b\x5a\xb5\xdf\xa9\x95\x7e\x5f\xab\xca\xaf\xbb\x16\xb4\xab\x38\xf9\x29\x39\xd7\x1d\xc5\xaa\x27\x97\x1f\x97\x98\xa1\x65\xcc\x4d\x45\xc7\x5f\x9c\x85\xbd\xb5\x5a\xe2\x76\xd3\xac\xa3\xca\xd8\xd9\x47\xab\x6b\xe5\x94\xb2\xe2\xfe\xfc\xf3\x8c\x12\xe2\x26\x66\xb9\x8f\xfd\x89\xae\x67\x7b\x7a\x82\xcf\x87\x9b\xa9\x01\xd1\xd4\xcc\x6d\x82\x37\x7c\x55\xce\xb6\xbe\x99\x82\x69\x05\xe9\xb5\x3a\xf6\x4a\x47\x63\x52\xd6\x73\x7e\x9c\x0a\xbc\xe1\xbe\xa0\x13\x2f\xf3\x49\x43\x54\x74\x3f\xba\x8f\xfd\x53\xb2\x8b\xec\x97\xe5\xdb\x24\xcb\x84\x6e\x5c\x84\xf5\x02\x31\x03\xca\xd1\xb4\x72\x8c\x4e\x89\x76\xe6\x23\x13\x0f\x38\xf9\xf8\xfd\xec\x7b\x71\xf6\x4b\xf6\xb7\x33\xa8\xd4\x87\x97\x8b\x5c\xfc\x21\xf6\x4d\x39\xe5\x59\xdb\xc0\xda\x65\x1d\xae\xdd\xea\xf6\x34\xd2\x7a\xd5\x4b\xde\x6a\xed\xda\x19\xd9\x73\x5d\xfe\xe6\x9e\x9b\xb5\x39\x97\x5a\xf3\x37\x2e\x5f\xd5\x96\xe7\x9c\x66\x39\xa6\x1f\x6e\xa6\x1b\xcf\xdc\x75\x56\x4c\x2c\x75\x12\x6a\x72\x75\x95\x5e\x88\x36\x84\x9b\xe5\xec\xc3\x64\xed\x6d\x96\x93\xf9\x3b\x6f\xa5\x6f\x50\x73\x4d\x9a\x2b\x95\x14\xf4\x44\xb9\x7b\x76\x36\x2d\x74\xec\x7c\xef\xc0\x25\x6b\x97\x1e\x57\x3d\x4d\x4f\xa7\x7e\xca\x09\xd3\x15\x10\xae\xa6\x95\xf9\x6b\xfe\xe2\x89\xd7\x96\x66\xa0\x5c\x17\x6b\x68\x8b\xcd\x5c\x52\x50\x66\x32\xae\x37\x3b\x9a\x07\xf4\x31\xe5\x59\x3a\x95\x3e\xc5\xdc\x1c\x13\x5f\x8a\x1c\x2b\x63\x1d\x09\x0c\x44\x42\x40\x02\xbd\xa9\xe5\x18\x55\x5c\x53\xa8\x7c\x3c\xfd\xf8\xc9\x37\x0e\x95\xa0\x36\x5e\xf8\xd4\x78\xe3\x50\x93\x5c\xfc\x3b\xdb\x2e\x31\x12\xcf\x49\xaa\xfd\x5b\x0a\xa2\xa7\xe2\xa9\xcc\x77\xf6\x79\x67\xd2\xf8\x44\xd1\x57\x14\x2e\x7a\xf9\x3b\xdb\x42\x7e\xd6\x7e\x7d\xc5\xfe\x6d\xf1\xe3\x66\xe9\x4d\x56\x8b\xf9\xe6\x76\x7e\xe9\xbd\x9d\xcd\x5d\xeb\x55\xb5\x5a\xad\x27\xeb\xdb\xd5\x66\xe5\x2d\x3f\xcc\xa6\xde\xe6\x66\x71\x75\x35\x9b\xbf\xdb\x78\xcb\xe5\x62\x59\x5d\xab\x1d\x11\xc5\x91\xc5\xd2\xf1\xb3\x51\x8e\x4e\x62\xb9\xf3\xc8\x5d\x03\x70\x36\x6a\xc3\xbf\xf4\x41\x88\x84\x5c\xeb\xab\x1f\x14\xcd\xaa\x57\xbb\x42\xfb\xc4\xb7\x5d\xf1\x0a\xee\x3b\xe4\xca\x7d\xb5\x4d\xaf\x6a\x6d\x72\xe7\xec\xd6\xca\xae\x61\x99\x41\xc0\x81\xf1\xed\x32\x8a\xed\x1a\x75\x1d\x10\x80\xa8\xde\xa7\x96\x0d\x83\x08\x0b\xe1\x38\x6a\xa3\x3f\x93\x5f\x1b\x00\x6d\x03\x78\x9f\x44\xfa\x9c\x73\x14\xe8\x2c\x3a\xdb\x65\x7a\xa7\xba\x12\x01\x46\x98\x08\x40\x5b\x96\xc8\xe2\xa5\xa1\xb5\x41\x98\xe1\x55\xd3\xdf\x9f\xb7\x4e\x9f\xb3\x48\x7b\xaf\x50\x25\xda\x18\x85\x6f\x6c\xee\xe7\x28\xc4\xf7\x38\xfc\xb6\xbc\x70\x4d\x05\x72\x75\xd5\xd6\x68\x90\xae\xe2\xe9\xe2\xfa\x7a\x31\xaf\x2f\x64\xb8\x9d\xff\x34\x5f\xfc\x3c\x1f\xc2\xd2\xfb\x30\x5b\xcd\x16\xf3\xcd\xdb\xc9\xec\xca\xdc\xc0\xb5\x30\xea\xf2\xd2\x9b\x5c\x5e\xcd\xe6\xde\xc6\xfb\xcf\xa9\xe7\x5d\xba\x96\xff\x74\x31\x5f\x4f\x66\x73\x6f\xb9\xb9\x9e\xad\x56\xb3\xf9\xbb\x61\xe1\xa7\x1b\x6f\xa9\x7f\x5d\xcc\x37\x97\xde\x5c\xd7\x91\xe4\x2f\x67\xd7\xe6\x0a\xae\xc9\xed\xfa\xfd\x62\x39\xfb\x7f\xed\xd0\x4d\xeb\xb4\xed\x44\x17\x70\x4c\xdf\x7b\xd3\x9f\x32\xac\xbd\xf9\x74\xf9\xf1\x46\xbf\xf8\xc9\xfb\xe8\xe8\xbb\x06\xbd\xf2\x45\x19\xdc\xca\x9b\x2e\xbd\xf5\xca\xde\xea\x55\x79\xf9\xf3\x64\xb6\x56\x12\xe2\xed\x62\xb9\x59\xdc\x78\x4b\x8d\x4f\xbd\x83\xd9\xf5\xb5\x77\x39\xd3\x06\x83\xb7\x5e\x7e\x1c\xc2\xcd\x62\xb5\xbe\x59\xcc\xbd\xcb\xf4\x87\xf4\xba\xd7\x27\x48\xc9\x16\x71\x38\xca\x15\x53\x2f\x41\xd9\xc0\x23\xb5\x76\x96\x65\x6a\xbf\x57\x38\xa8\xf6\xbe\x99\xa1\x1c\xa8\x54\xf8\xa9\xa5\x45\x6d\x8a\x5b\xda\xd6\xb9\xad\xb3\x71\x33\xb3\xd5\x3e\xed\xe2\xbd\xae\x0f\x5a\x81\xb7\x70\x62\xad\xad\x93\x31\x6b\xad\x2a\x7c\x59\x9f\xaf\x32\x9b\xd6\xbf\xb7\x5c\x5b\x11\xb0\x45\x8e\x7c\x7e\x95\xd1\xf3\x04\x6e\x11\xc6\xe9\xa6\x51\x96\xb3\xdf\x62\x1f\xd5\x56\x7b\xb6\x0a\xea\xb2\xf6\xc6\x9b\x5f\x6a\xd9\xb8\xf4\x94\x91\xe1\xa9\x3f\xd6\xb3\xa5\x7a\xa5\xff\xb0\xef\xa6\x4b\x6f\xa2\xa6\xaf\x2e\x49\xde\x7b\x93\xab\xf5\x7b\x3b\xf7\x39\xa3\x6a\x0b\x65\x08\xd3\xdb\xd5\x7a\x71\xad\x98\x7a\x73\x33\x59\xbf\xdf\x2c\xbd\xd5\xcd\x62\xbe\xf2\x36\x59\xbf\xd7\xb3\xf9\x66\x36\x5f\xad\x27\xf3\xa9\xb7\xda\xcc\x17\xeb\xcd\xcd\x72\x61\xd0\x75\x49\xc6\xc9\x74\x3d\xfb\xa0\xb8\xc3\x8e\xe8\x6a\x76\x3d\x5b\xab\x71\x4d\xdf\x2b\x9c\xe7\x8b\xcd\xa5\x77\x73\xb5\xf8\x78\xed\xcd\xd7\x5f\xcb\xc0\x3b\x4d\x64\x35\x12\xbb\xce\xcc\x86\x06\x0e\x08\x7a\x2a\x1c\xbf\xeb\x99\x71\xff\xee\x06\x94\xce\x5b\xed\x55\xdb\xb4\xd5\xe5\x4f\xc7\x2c\xd6\x3e\x68\x9d\xd4\x5a\xeb\xd6\x39\xad\xb5\x2e\x4d\x71\xe9\xad\xc0\xf7\x98\x13\xd9\x70\xd9\x4e\xbf\x6a\x22\x03\xe1\x69\x6b\x3b\xfd\xba\x73\x55\xbf\x37\x89\xc9\xfa\xbc\xac\x98\x63\x99\xde\x3a\x25\xb2\x8d\x6d\xb6\xba\x87\xda\x2a\x66\x3b\xf0\xf4\x75\x66\xb5\xf5\xf0\x33\xe2\x6a\xd7\x3b\x84\x19\xdd\xb1\xfa\x4e\x69\xe5\x7d\xf0\x96\xb3\xf5\xc7\x4a\xc9\xac\x59\x9c\x3f\x4f\x96\x73\xe7\x9a\x9e\xcd\xdf\x2e\xfa\x15\xc4\xba\xe0\xd7\x55\x8a\x93\xa9\x6c\xef\x0e\x11\xfe\x76\x51\x9e\x56\x59\xb9\x14\x0b\x4e\x9b\xd3\xda\xf5\x71\xfd\x27\x54\x7d\xda\x39\x9b\xab\xe2\x2d\x65\x6d\xdb\x56\xb5\x99\xf3\xda\x7d\x4c\xd3\xc5\xfc\x72\xa6\x95\x6f\x26\x20\xf3\x9f\x96\xde\x74\x31\x9f\xce\xae\x2a\x3f\xa7\xa6\x5f\xfe\xcb\xea\x76\x6a\x8c\x98\x9e\x93\x58\xc5\xcb\x65\x80\x94\xd1\x6a\x69\x51\xc0\xb2\xa5\x55\x83\x95\xe0\x18\xc3\x8b\xda\xb4\x3f\x99\x15\xd6\xc7\xb8\x75\xa7\x37\x50\xa0\xb2\xfb\x3f\x74\xe9\x40\x14\x25\x54\x5f\x64\x9e\x5d\x48\x97\x64\xc5\x83\x2d\x37\xff\x41\x7e\x59\xb4\x71\x0d\xa1\x50\x14\x92\xa0\x8b\x47\xbc\x98\x6b\xe6\xcf\xac\x3b\x29\x3d\xdc\x85\x58\x0f\xb9\x88\xb1\x7f\xa6\x7d\x54\x62\x1c\x05\xaf\xf4\x5f\xa3\xfc\x9a\xb9\x11\xa2\xc1\x88\xe3\x98\x71\xe9\x72\xc2\xaa\x01\x8b\x74\xef\x26\x99\x3e\xa5\x2f\x3b\xb9\x09\x08\xf5\xc3\x24\xc0\x17\xf0\x47\x78\xa9\x2f\xef\x7e\x79\x01\x6b\x9e\x60\x78\x38\x60\x13\xe3\x59\x36\xd5\x76\xea\xba\x4f\x14\x1c\x0b\x71\x17\x7b\x19\x60\xdd\xb7\x58\x4d\x82\xab\x4e\x8a\xf9\xae\xc9\x0b\xb8\xb2\x79\x64\x22\x2d\xe7\x08\x88\x9a\xea\x6d\x22\x71\xda\xa5\xb9\x97\x12\x81\xcf\xc2\xd0\x16\xb9\xb0\x5d\x66\xad\x95\xbd\x81\x5b\x1c\x32\xba\xb7\xf9\xc2\x85\xd2\x6c\x53\xfd\x6d\xe1\x11\x61\xef\x11\xb4\x39\x2d\x79\xd9\xa4\x4d\x65\x29\xc3\x94\x0c\x5e\x9f\x9f\xff\xaf\xec\x73\x0b\xd9\x46\x4a\xef\x34\x69\xef\x9c\x75\xd3\x9d\xb9\x5e\x5d\xd9\x5d\x8e\x7c\xae\xc6\xc8\x52\x83\xd0\xab\x91\xbf\x2d\xd8\x12\x63\xee\x3b\x8e\xa5\xe8\x4a\x9a\x6a\x4f\x93\xaa\x86\xff\x4d\x1f\xb5\x66\x0d\x6c\x61\x51\x4a\x17\x65\x69\x12\x4a\xa1\xab\x86\x13\xfa\x82\xc2\xc9\xf5\x3a\x1f\x88\x94\x92\x2b\x8b\xf7\x93\xe7\xb7\xe7\x66\x67\x48\xbd\x3e\x3f\x77\xc0\x4c\x71\xb2\x11\x6f\x73\xf0\xa6\xa5\xf4\x1a\xf1\x3d\x96\xe6\x0e\xb3\x6a\x94\xab\xe9\x24\x84\x8e\x58\x4b\xbf\xd3\x10\x6a\x59\x13\xe9\x85\xfa\xcc\x1e\xc5\xa7\xef\x48\x49\x2f\x77\xd5\x22\xc5\xba\xda\x0d\xe2\x43\xf7\xd1\xd3\x29\xc1\x51\x98\x5d\xd8\x4d\x74\xb6\x3f\x6f\x38\x28\x40\xa2\xda\x71\x07\xa7\x8d\x6f\x8d\xea\x4d\xca\xe9\xb1\xc6\x35\x87\x75\x05\x8b\x39\x99\x4b\x32\xd8\x62\x88\x11\xcf\x18\xe5\x76\x39\x33\xa7\xa9\xfa\x61\x22\xc8\x3d\x76\x9e\x9c\x9a\x5f\x0f\xa1\x89\x22\xf5\xdc\xd5\x06\xe4\x3c\xd8\xeb\xa4\x11\xd5\x15\x53\xc7\x7a\x55\x5f\x38\x63\x61\xf5\x72\xc0\xc2\xc4\x68\xfd\x96\x39\xeb\xb2\xa5\x62\x46\xd5\xec\xb1\xcb\x7d\x76\x36\x04\xb5\x59\x4f\x96\xef\xbc\xf5\x66\x72\x75\xb5\x98\x1a\x2f\xc4\xfa\xe3\x4d\xe5\xf6\xfc\x8e\xb6\x57\x93\xb5\xb7\x5a\xbb\xc2\x6c\x1d\x1f\xa6\xfb\x84\x7a\xac\xcd\x1d\x69\x1b\x9d\x80\xf5\xe9\x1f\x9b\x61\x9c\xfe\x5d\x3a\x8a\xaa\xfe\x5c\xd9\xbb\x74\x9f\xaa\x47\xd3\xef\x0b\x0d\x9f\x10\x6e\xbb\x4c\xc3\x63\xd6\xe2\x29\xc6\xc7\x74\x82\x20\xe3\xb6\xec\xbd\xca\xdd\x65\xa6\x12\xbf\x5b\x60\xec\xa4\x5b\xe3\x7f\x03\x05\xeb\x98\x97\x7f\x53\x45\xfb\xcf\xa5\xd3\x8a\x82\x4e\x57\x73\xd5\x0f\xac\xf8\x2d\x15\x92\xd9\x45\x68\x95\x94\x14\xca\x0d\xea\xca\xa8\xf5\x0a\xbf\x7f\x32\x65\x94\xf2\xf6\xff\x28\xa5\x7f\x37\xa5\x64\x9e\x84\x93\xe7\xb1\xd4\x2d\x27\xed\x3c\x71\x49\x44\x1c\xa2\xa3\x59\x1f\x66\xea\xd5\x9a\xc8\x99\x3b\x21\x41\x5d\x23\xd6\x3a\xaf\x76\x4b\x82\xe7\xa9\x41\xc5\xe6\x6a\x6b\x29\x04\xd9\x53\x1c\xa4\xe7\x87\x99\x13\x7c\x76\x04\xf3\xcc\x2f\x2f\x39\xd9\xd7\xae\xd2\xc9\x0e\x92\xd5\x55\x05\x70\x7b\x3b\xbb\xfc\x2e\x5d\xfd\x88\x06\xb0\x4f\x10\x47\x54\x62\xe3\x6c\xe0\x58\x6d\xfb\xf3\xfb\xc4\x20\xa1\x92\x84\xb5\xda\xd5\x74\xff\xae\x60\x06\x38\xc4\xb2\x7c\x25\x69\x2f\xbd\x97\xc4\x01\x92\xb8\x9a\x25\xd0\x40\xd4\xb6\xec\x80\x2a\xc1\x33\xb8\xcf\xa3\xfb\xda\xde\xf7\x3f\x8a\x58\x60\x2e\x24\x50\xfd\x9e\x3e\xcc\x32\xdf\xf6\x64\x9a\x12\xaf\x3e\x11\x79\x3d\x93\x4a\xa8\x13\x5a\x54\x44\xe9\x39\x70\x5a\x11\x69\x7f\x8f\xcb\x76\xea\x3d\xce\xff\x1f\x00\x00\xff\xff\xbe\x60\xa6\xb7\x96\xab\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x6b\x73\x1b\x37\xb2\xe8\x77\xff\x8a\x2e\x7b\x6f\x31\xd9\x22\x29\xd9\x9b\xc7\x5e\xdd\xb3\x1f\x18\x6a\x6c\x73\x23\x91\x3a\x24\xe5\x1c\xdf\x53\xa7\x28\x70\x06\x24\xb1\x9e\x01\x26\x00\x46\x12\x37\xe5\xff\x7e\x0a\x8f\x79\x63\x1e\x94\x9c\x64\x77\x6b\x27\x1f\x22\x73\x30\x8d\x46\xa3\xd1\xdd\xe8\x07\xf0\x0a\xa6\x2c\x3e\x72\xb2\x3f\x48\x78\x73\xfe\xe6\x0d\xbc\x63\x6c\x1f\x62\xb8\xba\x9a\x8e\x61\x12\x86\xb0\x54\xaf\x04\x2c\xb1\xc0\xfc\x1e\x07\xe3\x17\xaf\xe0\xc5\x2b\xb8\x22\x3e\xa6\x02\x07\x90\xd0\x00\x73\x90\x07\x0c\x93\x18\xf9\x07\x9c\xbe\x19\xc2\x07\xcc\x05\x61\x14\xde\x8c\xcf\xe1\x2b\xd5\xe0\xa5\x7d\xf5\xf2\xeb\xff\xf7\xe2\x15\x1c\x59\x02\x11\x3a\x02\x65\x12\x12\x81\x41\x1e\x88\x80\x1d\x09\x31\xe0\x47\x1f\xc7\x12\x08\x05\x9f\x45\x71\x48\x10\xf5\x31\x3c\x10\x79\xd0\xdd\x58\x20\x0a\x8f\x8f\x16\x04\xdb\x4a\x44\x28\x20\xf0\x59\x7c\x04\xb6\x2b\xb6\x03\x24\x0d\xc6\xea\x39\x48\x19\x5f\x9c\x9d\x3d\x3c\x3c\x8c\x91\xc6\x76\xcc\xf8\xfe\x2c\x34\x2d\xc5\xd9\xd5\x6c\xea\xcd\x57\xde\xe8\xcd\xf8\xdc\x7c\x73\x4b\x43\x2c\x04\x70\xfc\x73\x42\x38\x0e\x60\x7b\x04\x14\xc7\x21\xf1\xd1\x36\xc4\x10\xa2\x07\x60\x1c\xd0\x9e\x63\x1c\x80\x64\x0a\xe3\x07\x4e\x24\xa1\xfb\x21\x08\xb6\x93\x0f\x88\xe3\x17\xaf\x20\x20\x42\x72\xb2\x4d\x64\x89\x5c\x29\x7e\x44\x94\x1a\x30\x0a\x88\xc2\xcb\xc9\x0a\x66\xab\x97\xf0\xc3\x64\x35\x5b\x0d\x5f\xbc\x82\x9f\x66\xeb\xf7\x8b\xdb\x35\xfc\x34\x59\x2e\x27\xf3\xf5\xcc\x5b\xc1\x62\x09\xd3\xc5\xfc\x72\xb6\x9e\x2d\xe6\x2b\x58\xbc\x85\xc9\xfc\x23\xfc\x38\x9b\x5f\x0e\x01\x13\x79\xc0\x1c\xf0\x63\xcc\x15\xfe\x8c\x03\x51\x84\x34\xb3\xb7\xc2\xb8\x84\xc0\x8e\x19\x84\x44\x8c\x7d\xb2\x23\x3e\x84\x88\xee\x13\xb4\xc7\xb0\x67\xf7\x98\x53\x42\xf7\x10\x63\x1e\x11\xa1\xa6\x53\x00\xa2\xc1\x8b\x57\x10\x92\x88\x48\x24\xf5\x2f\xb5\x41\x8d\x5f\x10\xba\x63\x17\x2f\x00\x24\x91\x21\xbe\x80\x65\x42\xcf\x56\x98\xdf\x13\x1f\xbf\x00\x08\xb0\xf0\x39\x89\xd5\xc7\x17\xb0\x3e\x60\xf5\x1a\xec\x6b\xe0\x58\xb0\x84\xeb\x76\x8f\xa3\xc0\x0f\x47\x42\xf2\xc4\x97\x23\x8a\x22\x7c\x01\x39\x10\xf3\xf2\x80\xc4\x88\xa0\xe8\x02\x24\x4f\xf0\x8b\x18\xc9\x83\x50\xdd\xee\xb1\x54\xff\x73\x74\xb5\x4b\xa8\xaf\xfe\xa5\x78\x4e\x4f\xda\x1e\x2b\x56\xdb\x31\x1e\xe9\xd1\x00\xda\xb2\x44\x02\x2a\xf4\x04\x10\x23\x8e\x22\x2c\x31\x17\x06\xea\x08\xaa\xd8\xa8\x27\x65\x13\x8b\x8c\xf9\xb1\x84\xc0\x04\x76\x49\x18\x02\xa1\x42\x6a\xa6\x66\xbb\x52\x47\x8a\xb7\x8e\x7d\xf1\xd6\x8d\xff\x51\x30\x0f\x70\x88\x25\xee\x8b\xba\x69\xfd\x7b\x62\x3a\x09\xc3\x53\x91\x0d\xc3\x5e\xe8\xc6\x9c\xfd\x0d\xfb\xb2\x0d\x5d\xe1\x1f\x70\x84\x2e\xec\xbf\x00\xe4\x31\xc6\x17\xa0\x44\x00\xdd\x97\x60\x85\xcc\xd7\x33\xfb\x0c\x60\x21\x11\xbd\x57\x82\x6a\xeb\x60\xa8\x08\xd1\xe3\x3f\xdf\xd0\x95\xea\x60\x14\x53\xa9\x91\x34\x4d\x2d\xbe\x76\x2c\xe9\x87\x56\x44\x95\xf9\xcc\x48\x17\x12\x64\xa3\x12\x67\xbf\xfc\x62\xff\xfc\xfc\xf9\x2c\x45\x4f\xfd\x9a\xfe\xfd\xf9\xf3\x99\x30\x30\xd4\xaf\x6a\x18\x9f\x3f\x97\xa0\xc5\x88\x63\x2a\x47\x3e\xa3\x4a\x5d\x61\x5e\xa5\x58\x2e\xd1\x7c\x8e\x91\xc4\xa5\xc1\xba\xc4\x5d\xf1\x0d\xc7\x28\x18\x49\x12\x61\x96\xc8\x0b\x38\x2f\xbd\xd3\xa2\xa2\xe9\xa5\xe1\xef\xfa\x5b\x43\x4d\xb6\x75\x4d\xa9\xfd\xb7\x99\xac\xec\x1f\x12\x47\x71\x88\x64\xfe\x43\x79\x78\xa3\xea\xa4\xc6\x9c\xc5\x98\x4b\x82\x45\x3e\x87\x88\x52\x66\xd5\x4a\xfe\xa3\x13\x19\xdd\x3a\x08\x88\x6a\x8b\xc2\x1b\x07\x2c\x27\x83\x15\x87\xbe\x67\x56\xab\x4c\xf2\x5e\x0b\x8d\x4a\x2b\x66\x70\x4b\x8d\x22\x4a\x94\x21\xf0\x09\x1f\xe1\x1e\x85\x09\x86\x08\xc5\x20\x0f\x48\x6a\x43\x64\x8b\x41\x60\xa9\xcc\x04\xfc\x28\x31\xa7\x28\x04\xc9\x58\x28\xca\x28\x31\x10\x92\x71\xac\x14\x29\x20\xbe\x25\x92\x23\x7e\x84\x08\x4b\x14\x20\x89\xc6\x6a\x71\x1e\x01\x71\xac\x4d\xa3\x9f\x13\xcc\x8f\xda\xdc\x50\xcd\xc5\x81\x25\x61\x00\x5b\x5c\x82\xa8\xb4\xbc\x36\xce\xe0\xe1\x80\x29\x44\x2c\x20\xbb\xa3\x52\xdb\x86\x60\x62\x0c\xd3\x90\x25\x81\x56\xb3\x0f\x24\x0c\x21\x66\x71\xa2\x66\x0a\x04\x8b\x70\x91\xe6\x25\xb0\x89\x50\x30\x06\x03\x9e\xd0\xf1\x5e\x5b\x85\x28\x26\x62\xec\xb3\x68\x30\x50\x36\xc5\x60\xa0\xf9\x9d\xee\xc7\x9f\x28\x92\xe4\x1e\x8f\x03\x7c\x3f\x18\x68\xa6\x10\x31\xf2\xb1\x50\x63\x21\x65\xa0\x3b\x82\xc3\x00\x76\x2c\x0c\xd9\x83\x80\x1f\x93\x2d\xe6\x14\x4b\x2c\x8a\x58\x14\x60\x68\x5b\x4a\xdb\x1a\x62\xa8\x29\xc0\x93\x10\x8b\x71\x09\xe4\xb5\x22\xa6\x36\x38\x52\x03\xef\x53\x06\x76\x4c\xd8\x59\xc0\x7c\x71\x96\x08\xcc\x47\xfb\x84\x04\xf8\xac\xd8\x53\x06\x68\x4b\x28\xe2\xc7\x49\x22\x0f\x8c\x93\xbf\xeb\xb7\x3d\x18\xb0\xc2\x47\x3f\xd4\x81\xb8\x1a\x1b\x60\x56\xe0\xb4\x7f\x53\xe2\xc1\x15\x96\xca\xb8\x14\x99\xc9\x66\xbe\x85\xd2\xc7\xb0\xc3\x48\x31\x69\x91\x46\xae\x95\xa6\x07\xcd\x31\xfa\xb4\x0f\x91\x10\x7f\x4d\x84\x54\xc6\x5f\x6d\xe0\x2d\x8b\xc8\x45\x00\x37\xc0\xca\x47\xa5\x41\xcd\x76\x86\x7f\xa9\x1c\x02\xa1\x81\xfa\x02\x0b\xb5\x48\xd4\xa6\x20\x87\x67\xb9\x51\xef\x12\xfe\x56\x84\x3d\xae\x00\x07\x78\xcb\x38\x44\x29\x4f\xa4\xba\x8c\xd1\xc2\x68\x87\x20\x30\xd6\xcc\x22\x2e\xce\xce\x7c\xb5\x38\x2c\x87\x2b\xee\x3e\x33\xbc\x30\x42\x45\xb2\xa6\x5c\x44\xe8\x7e\x94\x03\xaa\xac\x17\x7c\x89\x77\x28\x09\xa5\x9b\x80\x5b\xc6\x42\x8c\xaa\xc4\xa8\x50\xf0\x36\x03\xd2\x41\xb4\x35\x4f\xb0\x83\x62\x8a\x2d\x02\x03\x20\x95\xbf\x03\x61\xb9\xbb\x46\xa9\xd2\x08\x21\x66\x21\xf1\x8f\x63\x05\xfc\x2d\x0a\xd5\x16\xce\x7c\x56\x69\xa6\x25\xc8\x16\xab\x5d\x8b\x92\x4b\x41\x06\xd5\x0f\x09\xa6\xb2\xbe\x6a\xba\xa4\xef\x54\x7f\xd7\xc4\xf4\x93\x4c\x42\x92\x00\x53\x35\xef\x98\x67\x0b\x60\x72\x33\xb3\xdd\x8e\x2b\x68\xd8\xdd\xe7\x53\xb1\xb1\x9f\x77\x23\x75\x6f\x77\xb9\x7d\x91\xd3\x9a\x7d\x4d\x22\xdc\x03\x33\xc3\xbd\x17\x10\x20\xab\xa0\x5b\xb0\xce\xe0\x16\xda\x28\x9b\x60\x41\xc3\x63\xc9\x5c\xa8\x8d\x65\x91\xc8\x38\x91\xc0\x68\x78\xd4\xba\xc7\xe0\xa8\xc6\xa4\xba\x1c\x97\xfa\xcc\x05\xeb\x88\x44\x51\x22\x15\x03\x54\xa0\xeb\xaf\x19\x7f\x02\xe1\xcd\x87\xcf\xc3\xdf\x8b\x10\x09\x95\x5d\x60\x36\xbe\xc6\x15\xa0\xf8\x57\xcd\x8e\x5a\x28\x41\x8a\xe0\xc9\x03\x33\x86\xd2\x97\x9f\xba\xcb\x0c\xee\xf3\xa7\x4e\xe3\xf8\xe4\xa9\x2b\xc2\x3e\x79\xfa\x2e\xf3\x8f\x9b\xd0\xbd\x55\x3a\x38\xe6\xec\x9e\x04\x38\x28\xbe\x4a\x27\xca\xaa\xc4\x1c\x71\x2c\xd1\xfe\x74\x54\x3c\x89\xf6\xcf\xa3\xe5\x04\xc4\x51\x48\x1c\x8d\xf6\x98\x62\xae\xd9\x66\x47\xe8\x1e\xf3\x98\x13\x2a\xed\x0a\x27\x22\x5d\xfd\x25\xc1\x6a\xc7\x92\xfa\x31\xc6\x70\x6d\x2c\xc3\x7c\x43\x29\xb1\x2f\x8d\x95\x66\x75\x18\xf8\x8c\xee\x42\xe2\x4b\x08\x12\x35\x34\x48\x62\xc5\x38\xe2\xe4\x19\xc4\x8f\x31\xe1\xbf\x02\x8f\x7a\x19\xdc\xe7\xd1\x55\xa9\x66\x64\x57\x52\x90\x91\x68\xa8\x09\xa6\x30\x00\xb4\x93\x98\xc3\xc3\x81\xf8\x07\x20\x65\x15\x98\xea\x9d\x18\xf3\x08\x45\x98\xca\xf0\x98\x42\x3a\x99\x4e\x76\x5a\x9d\x8c\x4e\xa8\xc4\x7b\xcc\x1d\x84\x22\x54\x7e\xf7\x4d\x33\x91\xde\x65\x40\x9f\xcb\x7c\x34\x89\xb6\xda\xa3\xa6\xf6\x15\x8c\x32\xc9\x28\xf1\x51\xa8\xfd\x3d\x4a\x7c\x09\x2c\x00\xdf\x63\x7e\x84\xca\xb4\x81\xa6\xa4\xb2\x75\x2d\x7f\x29\xc3\x40\xcb\x05\xa1\x1d\x98\x42\x22\x79\xba\x5c\x20\x74\xaf\xc4\xe9\xe9\x0b\x71\x66\x3e\xec\xb4\x81\x6d\x3b\x8f\x26\x51\x13\x85\x6e\x8c\xd8\x30\xc3\xb1\x08\xa9\xcd\x56\xd1\x20\x26\x22\x93\x20\xb0\xa0\xc0\x34\x51\x87\x25\xf2\x70\x2c\x13\x4e\x0d\x14\x3f\xe1\xdc\xb0\x11\xdb\xda\xfd\x53\x15\xf0\x50\xed\x73\x66\xf3\x77\x4b\x6f\xb5\xda\xac\x97\x93\xb7\x6f\x67\xd3\xcd\xed\x7c\x75\xe3\x4d\x67\x6f\x67\xde\x65\x09\x36\xd9\x01\x65\xc0\xf1\x3d\x31\x16\x81\x00\xe4\xeb\x6d\x51\xa1\x15\xa6\x49\x54\xa4\xe3\xa8\x27\xf4\x7a\xbb\xc9\xd5\x55\xeb\xfb\xd9\x7c\xed\x2d\xe7\x93\xab\xcd\x62\x7e\xf5\xb1\x5f\xcb\xab\xc5\xe4\x72\xf3\xc3\xe4\x6a\x32\x9f\x7a\xcb\xec\x93\x10\x6d\x71\xf8\x5b\xee\xc6\xaf\x74\x87\x27\xaf\xa1\x6b\x14\x2b\xd9\x6b\xa0\xab\x1d\xba\x76\x55\x9b\x6d\xba\x30\x6b\xc9\x47\xb4\x28\x89\x19\xdf\x23\x4a\xfe\x5e\x5e\x40\xea\x1b\x65\x28\xec\x95\xc5\x8b\xf3\x2d\x74\x59\x79\x19\xa2\xe8\x5d\xba\x38\x20\xb5\xb4\x74\x80\xc2\x04\x4f\x06\xe5\x7d\xc1\x96\x84\xa1\x42\xc9\xa8\x94\x21\x08\xa6\xd8\xef\x58\xc5\x66\x47\x42\x89\xb9\xe6\x38\xbd\xc1\x80\x80\x3d\xd0\xec\x63\xff\x80\xf8\x1e\x57\x00\x1f\x41\x62\x14\x0d\x21\x73\x76\x0d\x01\xd3\x7b\xc2\x19\x8d\xf4\x3f\xf4\x72\x1f\x02\x96\xfe\xd8\xb9\x2b\x1a\x82\xe2\xd5\xb2\x98\x6d\xdc\x15\xa5\xd2\x7a\x14\x21\x8a\xf6\x98\x9b\x1d\x91\x31\x16\xe9\xde\xfc\xaa\xfe\x08\xab\xd3\x07\x6a\x48\xcd\x60\x13\xbb\xb7\x52\x3a\x90\xec\xb5\xfe\x3b\xb3\xe4\x2d\xbb\x2d\xca\x3e\x8f\x92\x0b\xc3\xb6\xd7\x73\xe0\xf2\x57\x68\x77\x85\xcb\x5b\x51\x82\x59\x76\x5c\x30\x81\x8b\xd3\xac\x1d\x6c\x4a\x40\x1b\x3f\x84\x96\xb1\xfe\x01\xd1\x3d\x16\x46\x35\x51\x26\xb5\x7a\xe2\x59\xa4\xac\xc8\xe1\x3d\xc4\x6c\x88\x84\xbc\x36\x52\xfb\x09\xe6\xf3\x55\xe1\xeb\x5f\xc1\x86\x56\xc8\x55\x0c\x69\xab\x61\x4e\xb7\xa4\xd5\xbc\x09\x69\xb6\x2c\xc1\xd2\xca\xcb\xa7\x8c\xd8\x01\xe6\x79\x43\x9f\xa3\x08\x97\x46\x6c\xf6\x6b\x41\x26\xd5\xc7\x3a\x7c\xe6\xb3\x48\x2d\xb0\x32\x93\x13\x0a\x77\x1c\xfb\x8c\xfa\x44\xad\xd8\x3b\xad\x94\x72\x99\x58\xf5\x46\x64\x4d\xed\xe6\x9b\x33\x1f\x8b\x1a\xc4\x6c\x05\x3c\x91\xc8\x4b\x8c\x82\xe3\x73\x49\x5c\x02\xf2\x25\x09\xac\x80\xe7\x0a\x53\x8b\x68\xa2\x94\xaf\x5e\xa7\x20\x39\xda\xed\x88\x5f\x76\xf0\x14\xc9\xff\x3c\x92\x57\x5c\xa8\x9a\xfc\xcf\x25\x79\x42\xfd\xc3\x4a\xa2\x7d\x1f\xf3\xbb\x46\xea\xec\xe3\x4e\x7b\xa9\xd0\xb6\xcd\x66\x1a\xac\x35\x95\x55\x5b\xa5\x0a\xf6\x18\x90\x80\x00\xef\x08\x35\xb1\xec\xff\xb6\xb1\x7e\x33\xe2\x9b\x10\x49\x45\x2e\x8b\x49\x99\xea\xea\x6b\xf1\x3f\x5f\x59\xff\x6a\x4d\x88\x4b\xcc\x23\x71\x66\xba\x1a\xe9\xae\xc4\xd7\x45\xaf\xb3\x48\xe2\x98\xf1\xca\x82\xb9\x9b\x5c\xdd\xbc\x9f\xdc\x0d\xe1\xee\x07\x6f\xad\xfe\xaf\xe4\xea\xdd\xbb\xc9\x9d\x76\x41\x51\x66\x3d\xec\x8a\x23\x4c\x68\x1a\x07\x43\x78\x37\xd1\x86\x95\x10\x49\x84\x83\x31\xdc\x30\x21\xc8\x36\x2c\xab\x70\xa3\xf2\x2f\xe0\x6a\x72\x3b\x9f\xbe\xdf\xac\xd6\x93\x77\x5e\xd1\xae\x1a\xc2\xed\x7c\x76\x7d\x73\xe5\x5d\x7b\xf3\xb5\xfa\xe7\xcd\xd2\x33\x6d\x87\xe0\x4d\x96\x57\x1f\x37\x93\xe9\xd4\x5b\xad\xca\xa6\xa3\xc6\x76\x08\x0a\x57\x85\xc6\x10\x2e\xbd\x9b\xa5\x37\x9d\xac\xbd\xcb\x41\xab\x85\xd7\x84\x46\xa9\x51\x09\xa3\xd2\x9b\x0c\xb9\xd2\xaf\x45\x3c\x4b\x2f\x34\x9a\xa5\x5f\x14\xc6\xa5\x1f\xde\x95\xff\x99\x8f\x23\xe7\x64\x56\xf7\x04\xf7\x64\x63\x56\x73\xf9\xd6\x22\x8f\x29\xf4\xcc\x53\x56\x88\xf9\xe7\x60\x7b\x2c\x38\xdd\xe5\xc9\x28\xce\x51\xd4\x28\xac\x06\x35\x97\xca\x2e\x51\xfb\xae\x9f\x13\x14\x6a\x0e\xd4\x7d\x1a\x09\x96\x6f\x37\xac\x05\x26\x0a\xce\x50\xf5\xec\x38\x8b\xac\x67\xce\x36\x5c\xe2\x9f\x13\x2c\xe4\xd8\x84\x02\x35\xc3\x3b\xdf\xdb\x40\xe2\x86\x04\x65\xe9\xf7\xd6\xee\x44\xf3\xa0\x64\x1a\x93\x2c\x86\x24\xb3\x88\x64\x21\x20\x99\x43\xfc\x9c\x73\xab\xfd\xf8\x74\x12\xde\x94\x22\x7b\xce\x49\xb6\xb0\x9f\x3d\xc7\x79\x80\x73\x87\x39\xa6\x7e\x79\x4f\x31\xca\xe0\x5e\x18\x71\x93\xfe\x33\x35\x52\xeb\x98\xda\x18\xd4\x45\x31\x78\x69\xc9\xa1\x67\xa5\xd2\x7f\x41\xbf\xd4\xe9\x54\x77\xe9\x57\x08\xb5\xcc\xbf\x3e\x59\x71\x96\x79\x71\x99\xee\x5c\xb9\x92\x89\x25\x7f\x99\x92\x88\xf9\x66\x76\x8b\xab\x61\x1a\xe4\xeb\xd4\xa3\x98\x51\xbd\x65\x38\x60\xbb\x15\x51\xbb\x8e\xad\xde\x2d\x11\x09\x84\x4a\xe6\xf0\x15\xc0\x4f\x07\x4c\x01\x01\xc5\x0f\x15\x3d\x9c\x77\x6d\x6c\x24\xbd\x73\x41\x14\xf0\x23\x11\x52\x47\x1f\xa9\x7e\x6d\x7c\x59\xc1\xb0\x1a\x84\x44\xe2\x48\xfd\x03\x67\x94\x25\x22\x2c\xc7\x26\x62\xcc\xb5\x26\x42\xca\xac\xc6\x4a\x31\x23\x7e\x04\x21\x71\x2c\x72\x9c\x8b\x04\xa8\xa2\x6e\x2c\x88\x12\x4c\x3b\x9c\xb5\x5a\xb6\x99\xb6\x17\xe0\xa3\x30\xd4\xf6\x5d\xd1\x34\x50\xa3\x26\x21\xae\xda\x68\xa4\x66\x9e\x59\x40\x43\xb8\x4b\x5d\x08\x9b\xdc\xb5\xa4\x34\x9b\xb1\x71\x36\x6a\xc2\x8f\x1b\x6d\xe9\x98\xdf\xad\x6d\xb3\x51\x58\x25\x02\x8b\xbb\x61\x6d\x13\x7a\x97\x70\x72\x67\x48\x75\x40\xf7\x58\x99\x43\x54\x10\x25\x36\x8a\x5b\xda\x48\x67\xee\x45\x44\x44\x48\xfa\x07\xeb\x1d\x91\x98\x06\x15\x59\xa4\x87\x7f\x01\x0b\xea\x3b\x86\x05\xec\x1e\x73\xf8\x4a\xf5\x6a\xd3\xf0\x70\x18\xa8\xdf\x77\x28\x14\xf8\x6b\xed\x9f\xe3\x58\x6d\x83\xca\xdb\xf9\x07\x06\xb1\x55\xc0\xc0\x12\xe9\xb3\x48\xa9\xdd\x0a\x78\x91\xf8\x3e\xc6\x6a\xd3\x6c\xe0\xe3\xcc\xc0\xd3\x38\x95\x20\xea\x41\x58\x27\x4f\x26\x5b\x8d\x00\xe1\x18\x1e\x90\xd0\x0c\xc6\x39\xe3\x36\x1a\x5c\xee\x6a\x87\x48\x88\x2b\x22\x53\x4f\xb8\xee\x29\xdd\x70\xef\x58\x42\x03\x6d\x3b\x2a\xb3\x85\x50\x14\x6e\x7c\x46\x8d\xd1\x38\xd6\x2d\x8d\x05\xd2\x62\x2e\x66\x63\x32\xae\x4b\x13\xce\x56\x43\xd2\x94\xb3\x1b\x42\x3d\x98\x8b\x6c\xae\xef\xcc\xac\x76\xcc\xbc\x93\x91\xcc\x97\x5d\x9c\x95\xb7\xb4\xef\xec\xc2\xcc\xdf\x96\x29\x53\x1b\xa2\xa5\x9f\x93\x3d\x9b\x38\x1c\xd1\x32\x9f\x35\xe1\x55\xe0\x63\xc5\x01\x7a\x3e\x8a\x1b\xad\x94\x27\xd2\x2f\x86\x55\x07\x02\x8e\x62\x79\xd4\xda\x84\xe2\x87\xf0\x98\xed\xcc\x2c\x9b\x88\x31\x4c\x1a\x0d\x7f\x3d\x47\x88\x84\x49\x85\x81\x7b\x30\x84\xa5\x68\xf6\x6f\x71\x37\x1e\x9c\xaa\xc2\xd2\xc4\x98\xd3\x13\x0a\xd6\xe5\x94\x1a\x68\xdc\x11\x38\x1a\x96\x74\xc9\xd2\xa6\xee\x18\x9b\x26\x45\x28\xf3\x3d\x19\x62\x66\xc4\x77\x38\x54\x0b\x80\x9b\xb2\x09\x1a\x72\x77\x5a\x46\x0b\xbd\xbc\x86\x70\x52\x0a\x82\x3b\x97\xa7\x46\x90\x1f\x97\xd7\x23\x21\x8f\x61\x29\xfd\xa5\x66\xac\x94\x17\x4c\x96\xb9\x35\x65\xd4\xe8\x5b\xff\xe8\x1e\x69\x3d\x86\x00\x2d\x71\x04\xc7\x28\xa6\x8e\xae\xda\x86\xb3\xc2\xd2\x48\xcd\x08\x3d\x92\x28\x89\xd2\x10\x02\xdb\xe9\xac\x2d\x2c\xa4\xd5\x17\x18\xa9\xcd\xa0\x5d\x6c\x69\x92\x64\x8d\xdc\x6a\x61\x70\xec\xe3\x8a\xf7\xba\x40\x82\x86\x29\x46\x9c\xa3\x2a\xa2\x4d\x43\x6b\x9d\x9f\xf7\x4c\xc9\x51\x2d\x2b\x08\x55\x9b\xd4\xac\x67\x33\x0e\xb3\x8d\x35\x2d\x12\x4a\xa4\x1a\x29\x7e\xc4\x7e\xe2\xc8\x37\x81\x9c\x9d\x53\x2f\x46\x35\x6b\xc4\xa6\x1a\x63\x1a\x8c\xb4\xa0\xa9\x59\x66\x79\xa3\x90\x08\x69\x17\x9f\xfa\xb3\xd2\x86\x48\x1c\x35\xf2\xaf\x73\x01\x74\x2d\xe9\x46\x7a\xd5\x13\xf2\xf2\x67\x04\x24\x2a\x7b\x12\xcc\xd3\xb4\x76\xcd\x83\xf8\xde\xf9\x7b\xdb\xec\x56\x86\x60\x17\x21\xdf\xd7\xb1\x85\x9a\x81\x3b\xe1\xfb\xc4\x78\x72\xac\x01\x87\xa9\xe4\xc7\x98\x11\x2a\x6d\x3c\x9b\xf9\x9f\x30\x37\x63\x19\x0c\xdc\x20\x01\xa6\xd7\x97\xda\xd0\x54\xc2\x8c\xd8\x9d\x19\x11\xda\x21\x9b\x3a\xec\xc7\xf0\x01\x71\xa2\xf3\xe8\xf2\x8d\x04\xfc\xe1\xab\x0f\x93\xe5\x66\x3e\xb9\xf6\xbe\x6e\x00\x8d\x38\x06\xfc\x18\x23\x65\x4f\x65\x29\x48\x05\x66\x1c\x0c\x44\xd1\xe5\xae\xed\x06\x04\xf7\xb6\xaf\x06\xa0\xbe\x16\x39\x4a\xf3\x28\x39\x13\xde\xa7\x66\x44\x86\x99\xd2\x45\xc6\x92\x53\xf6\xbf\x8d\x68\xd8\xf8\x67\x03\xcc\x84\x1a\x67\xb4\x95\xf0\x85\x91\x81\x38\x52\x89\x1e\x53\x65\x87\x85\x8f\xe2\x34\x5e\x81\x20\x60\x49\x33\xa2\x7f\xf8\xc3\x10\x08\xbe\x80\x3f\x14\xc0\x8d\xc1\xb3\x10\x0a\x84\x34\x0e\x70\xac\xac\xc8\x6d\x4e\xb0\x61\x03\x58\x8e\xf7\x88\x07\xa1\xf5\x2e\x3f\x1c\xb0\xae\x56\x50\x03\x4e\x09\x67\x36\x11\xba\x72\x81\x32\x39\x2e\xa4\xf8\x35\x80\x4c\xc3\x0b\x8e\xcc\x3f\x89\xc4\x27\x71\x46\xa8\x5a\x7a\xa3\x00\x49\x34\xb2\x35\x1c\x26\xad\x4b\xcb\x91\x91\xcf\xa2\x08\xd1\x60\x84\x2c\x47\xe6\x59\xba\x67\xaf\x78\x42\x29\xa1\xfb\x11\xca\x5a\x11\x3a\x42\x23\x71\xc0\x61\x38\x68\x59\x0d\x1d\xd2\xa4\xd8\xb4\x5d\xa6\x98\xa7\x41\xb2\x98\xa7\x45\x3f\x96\x7b\xca\xa4\x4c\x63\x5b\x3b\xc8\x2f\x22\x07\xa6\x06\x56\x0f\x51\xe0\x65\x2b\xdf\x40\x1f\xc3\x9c\x49\x2b\xd1\x2d\xb7\xea\x12\x1f\x4d\x75\xcd\xe3\x0d\xc3\xac\x88\x0c\xf0\xe6\xeb\xe5\xc7\x9b\xc5\x6c\xbe\xee\x94\x11\x0d\x10\x3b\x24\x47\x1f\x19\xd1\x00\xb9\x59\x72\x74\xcb\x88\x06\x90\x2d\x92\xa3\x5b\x46\x34\xcb\xab\x46\xc9\xd1\x5b\x46\x34\xc0\x6e\x90\x1c\xfd\x65\x44\x03\xdc\xba\xe4\xf8\xb7\x8c\xf8\x72\x32\x02\xd3\xfb\x2f\x22\x1f\x3c\x7a\xdf\x2d\x1b\xae\x88\x30\xe6\x5d\xbe\x5a\x32\x26\xd0\x96\x83\xd0\x25\x5c\xe5\x45\xe7\x5e\xcc\xbf\x17\xd1\x1b\x0c\xbf\x0a\x4d\x3a\xcc\xbf\x26\x62\xb5\xd9\x81\xe6\x19\xd5\x7d\x9d\xf9\xd3\x6e\x12\x9a\xa7\xea\x68\x77\x8f\xb2\x95\xb5\xa0\xd3\x11\x5f\x7d\x1a\x36\xb0\xc5\x10\xa2\x8b\x25\x9a\xc4\xb8\x79\xae\x13\xa1\xa5\x2a\x82\xe9\x66\x76\xe9\xcd\xd7\xb3\xb7\x33\x6f\x69\xbc\x49\x11\xb5\xf5\xa8\xf8\xd1\xc7\x38\x80\x3f\xbd\xf9\xfe\xbb\x3f\xeb\xcc\x0b\xe4\x4b\xcc\x45\x33\x64\xed\x91\xfb\xf2\x04\xfa\xa0\xc0\xf6\xa5\xd0\xe0\x14\x3d\xd5\x4a\xa3\x5c\x87\xc5\x1c\xdf\x13\x96\xe4\x51\x43\xf7\x1a\x34\x8b\xaf\x15\x66\xbe\x7d\xd3\x65\x2f\xf4\x08\x9c\x25\xd2\x3d\x83\xa2\x9f\x19\x6d\x01\x3f\xc5\x98\x6e\x85\xd8\x5b\x5d\x56\x14\x63\x07\xd0\x2f\xa8\x34\x0b\x78\x3e\x49\x75\xb6\x82\xad\x98\xde\xb6\x1a\x41\x0b\xda\x97\x2f\x87\x99\x13\x37\x75\x34\x84\x98\xee\xe5\xa1\x15\x22\x11\x76\x29\x6d\x8f\x4a\xe9\xba\xf5\xa1\x79\xcc\x02\x48\xd3\x61\x5b\x24\x92\x12\x6a\x7a\xd5\xad\xaa\x51\xa5\xf2\x53\x68\xd3\xbd\x3e\x5b\xc5\x34\x34\xac\xcf\x0e\x04\x4e\x92\xef\xfd\x00\x96\x3d\x3f\xba\x75\xe6\xb9\x72\xad\xa7\x81\x30\x64\x68\x93\x8c\xa7\x13\xbe\xa5\x45\x1f\x95\xa2\x1e\x81\x7d\x8e\xe5\x8f\xf8\xb8\xc4\xbb\xf6\x96\xbd\x67\x08\xea\xb3\xb4\x2a\xf4\xd2\xf7\xcb\x93\xa6\xea\x84\x0e\x2a\x4e\xbb\x10\xfb\x52\xa8\x5d\x8d\x86\x60\x44\x63\x5e\x88\xef\xca\x2e\x77\x3d\x26\xce\xac\xa3\x6b\x06\x15\xb8\x36\xd1\xcf\x76\x55\xd8\x6d\x37\xa4\xcf\xc8\x22\xd8\xd1\xac\xef\xb4\x43\x36\xf5\xdd\xed\x7a\xab\xce\xf4\x71\x4e\x7e\x8f\xef\x5a\xdc\xe5\xb4\x60\x71\xd8\x99\x22\xdd\xb3\xa2\x1e\xe7\xa4\xd8\x28\x3e\xfc\x62\x60\x6d\x74\xa9\x30\x90\x2e\xd6\xb1\xd4\x28\xe0\x90\x2a\x5e\x10\x0a\x41\x1b\x70\x1f\xbb\x92\x03\xcc\x17\x3a\x0d\x20\xef\xb2\x57\x7f\x64\x57\xef\x12\x41\x40\x76\x5a\x45\x65\x35\x66\x5d\x9c\x06\x1d\x71\xfc\xa6\xa7\x18\xdf\x37\x64\x4c\x03\xfb\xbd\x27\xb6\x39\xde\xef\x7a\xee\xeb\x75\x63\x4d\xcf\xf3\x18\xb3\x5e\x61\xd6\xf4\xd4\x52\x2b\x5c\x5c\x95\xce\x90\xc5\xbf\xcf\x7c\x00\x4c\x8d\xfd\x32\x30\x31\xbb\x41\xa6\x44\x6c\x66\x9e\xc9\xbf\x32\x51\x7d\x77\x44\xc3\xfd\xe8\x2c\xbc\x9a\x18\xfb\xbd\x58\xa4\x3f\xa1\xbb\x38\x45\xfb\x94\xda\xf6\xbd\x2d\xbc\x50\xad\x8c\x70\x7a\xe7\xa1\x66\xd3\xe7\x82\xe8\x76\x79\x95\xca\xa0\x4c\x0f\x19\x8c\xd4\x92\x4c\x33\xf9\xd2\x37\x0d\xa3\x5d\xe2\x3d\x11\x92\x1f\xd5\xa4\x5e\x1a\x47\x59\x3f\x07\x89\xcf\xa8\x8f\x63\xa9\xff\xb0\x3a\xf0\x4c\x77\x2e\x5c\xd6\x5c\xf3\x76\xf1\x54\x3a\x35\x6e\x11\x4b\x64\x2a\x6e\x09\xf3\x8d\x46\x96\x35\x08\x48\xe9\xd8\xcb\xf9\x6a\x73\x35\xf9\xc1\xbb\x72\x31\xa2\xce\x4e\xfc\x22\x2e\x8d\x9b\x5a\x9e\xa3\x13\xe5\xd4\xa9\xa1\x3b\x56\xb6\x35\x7e\x8c\x99\xc0\x46\x97\x97\x3d\x19\xb0\xa0\xa1\xbb\x6b\x50\xcb\xcc\x84\xc7\x14\x9c\x74\x3f\x92\x0d\xdc\xa8\xaf\x9c\x0e\xa6\xb3\xc8\x6c\x81\x1b\x20\x86\x44\x48\x4c\x4d\xfe\x8e\x4e\xc0\x51\x2b\x9f\xef\x90\xda\x4d\x7e\x75\x3e\xd6\xff\x7d\x9d\xfa\x63\xcb\x04\x97\xac\x01\xa6\xda\x6f\xfb\x3e\x36\x99\x22\x7a\x73\xc7\x22\x22\x75\x52\x10\x32\x98\xdb\x70\x65\xba\xf5\xf2\x0f\x4c\x60\x5a\x4b\x2e\x28\xcc\x17\x12\x36\x74\x5d\x41\xe1\xc0\x59\xb2\x37\x79\x30\x37\x8b\xe5\xda\x69\x0f\x37\xc0\x4c\xa5\x5f\x71\x40\x96\x1c\xd0\x24\xbe\xfe\xa9\x1d\x4a\xcd\xac\xda\xcf\x96\xcb\x08\xa5\x00\x75\xef\xae\xba\x55\x48\x5b\x80\xdc\x39\xc0\x4a\x44\x59\xe1\xd1\x77\xdf\x74\x53\x60\xbb\xf2\xac\x9b\x29\x17\x6a\xce\xeb\xe7\x57\x54\x9f\x28\xf3\x27\xdd\xa3\x90\x04\xb0\x9e\xde\x14\x39\x7a\x08\xe7\xf0\x1f\x39\xec\x8d\x7e\xf5\x1f\xad\x10\xbf\xfb\xf6\xdb\x3f\x7d\xd7\xac\x2c\x7f\x7f\x57\xdc\x6c\x57\x4c\xc8\x4e\x53\x48\xcc\x4f\x47\x5b\xbb\x19\x73\x26\x99\xcf\xc2\xd6\x91\x9a\x23\x0b\xf4\x31\x73\xec\x01\x67\xa5\x5a\x88\x63\x78\xa9\x74\xd1\xeb\x97\x7a\x47\xf4\xf2\xf0\xc6\x7f\xe9\x22\x48\xaa\xf7\x5b\x05\x77\xcb\x4a\xa9\xe5\x6a\x5a\x70\xed\x8d\x3b\x16\x55\x3b\x94\xb2\x76\x9f\xb2\x28\x4e\x24\xce\x7a\x4e\x37\x64\x26\x26\xae\x73\x35\x89\xe8\xf2\x69\xd7\x0e\x40\xe9\x52\xe1\x42\x32\x8e\xf6\xf8\x2c\x56\x86\x91\x62\x75\x39\xba\x67\x61\x12\x61\xf1\x2a\xa3\xa8\xdb\x47\xd3\x2d\x14\xfc\x38\x99\x05\x61\x0b\x83\xb6\x1d\x85\xe1\x20\xb6\x5d\xe0\x06\x6a\x63\xf3\x12\x55\x2f\xb1\xc9\xe2\xc2\x22\xf3\x7d\x4d\x6f\x6e\xf3\xa3\x73\xb4\x8e\x90\x32\xc4\x01\xb0\x36\x61\x44\x99\x04\x96\x48\x41\x02\x5c\x4c\xa3\x69\x9a\x04\x73\x48\x4d\xd7\xc0\x3b\xfc\x17\xfd\x92\xa1\xaa\x40\x3b\x16\x7b\x35\x43\x5f\x23\xda\x8f\x96\x03\x65\x7a\x40\x84\x23\xc6\x8f\x26\x5b\xfd\xe6\xd6\x14\x3d\x9a\x82\x0e\xed\x8f\x67\x12\x5f\xb4\x04\x64\xd5\xc3\x14\x98\xec\x9b\x74\xa1\x2b\x85\x9b\x02\x1c\x0c\x5e\x0f\x06\x43\x18\x0c\xde\xa8\xff\xa9\xae\x06\x83\x6f\x06\x83\xb6\x4d\x83\x3d\x0e\x07\xbe\xd1\x40\xec\xd2\x11\x80\x24\x84\x18\x09\x09\x6f\xde\x11\x35\x71\x06\x7b\x63\x0a\x99\x8e\xdb\x10\xdd\x59\xe7\x66\xac\xeb\x4e\x4c\xfc\x54\xe7\x22\xdb\x37\x83\xc1\xcf\x09\xa2\x92\xc8\xe3\x60\x00\x9f\xfe\x2c\xcc\x14\xb4\x40\x4c\x57\xe4\x9e\xc8\x43\xb2\xd5\xd5\x32\xf9\xe2\x2c\xfe\xb9\x0d\xd9\xf6\x2c\x42\x42\x62\x7e\x26\xa4\xae\xa5\x3c\x13\xdc\x3f\xfb\xf4\x67\xbd\x86\x51\x4c\x22\xe4\x1f\x94\x1c\x38\x9e\xc5\x9f\xf6\xea\x87\xac\x26\xf3\x2c\x45\x6a\xbc\x67\xae\xa5\x6b\xd6\xf7\x35\x4b\xe8\x17\xb2\x72\x3f\x14\x00\x76\x0b\x3a\xd3\x5a\xc9\xfb\x48\x7d\x91\xe7\x9a\xe7\x31\x72\x73\xf6\xa7\xc9\x4b\xff\x17\x34\xb5\x3a\xe9\xf5\xcc\x20\xde\xc8\x50\xf6\x06\x35\xba\xe2\xfb\x18\x74\x19\x8c\x2f\x6f\x60\x5c\x77\xa0\x07\xcd\x2e\x38\xf5\x95\x7b\xc3\x81\xa4\x31\x37\xda\xad\x0c\xb5\xee\x0d\x03\xe6\x3a\x40\x8f\x54\x01\xd7\xf1\x40\x25\xea\x2d\x54\x18\x5c\x0c\xb4\x87\xae\x15\xa6\xf5\xc0\xfc\xe7\x95\x85\x2c\x86\x40\xb2\xed\x57\x88\x77\xd2\xa4\x2b\xeb\xdc\x75\x6d\x21\x32\xa5\x87\x1e\x88\x68\x77\x40\x6d\x31\xdc\x99\xe2\x3a\xf1\x73\x78\x67\xce\xdf\x4d\x73\x34\xf3\x18\x9c\x25\x83\x5d\x54\xb5\xca\x68\x07\x50\x74\x8f\x48\x68\x0e\x91\x13\x85\x1e\xce\xfe\x3b\x05\xfe\x3f\x77\xee\x02\xf1\x56\xc0\x8c\xba\xe8\xa0\x2b\xca\x9b\xcb\xbd\x55\xb7\xda\x1c\x89\x8e\xea\x4f\x9f\x51\x8a\x7d\x39\xe2\x49\x73\x5f\xbf\xbf\xc5\x5b\x74\x07\x13\xbb\x81\xce\xeb\x2d\x52\xef\x03\xb2\x53\x52\x16\x5f\x59\x52\xaa\x97\xef\x44\x9f\x7e\xc0\x9a\xe7\x80\xd6\xf4\x85\x53\x24\xb9\x00\x54\x6a\x48\x6b\xc3\xd7\x75\xa4\x02\xd1\x60\xcb\x1e\x4b\x1b\x6a\xc9\xe0\xc0\x84\xac\xa4\xd6\xba\x8b\x32\xa1\x50\x98\xe9\xfd\x97\x37\xbd\x5d\xcf\x16\xf3\x8d\x37\xff\x30\x5b\x2e\xe6\xd7\xde\x7c\x5d\xae\xd0\x74\x37\x79\xe7\xcd\x5f\xd7\xb3\x0a\x1b\xdb\xbe\xa9\xea\xc2\x6a\x71\x26\x98\x5a\xca\x2e\x74\x7a\x7e\xa1\xb0\xeb\xdf\xf4\x4d\xa9\x69\xfd\x94\x0d\xf8\x6d\xf3\xe6\x6b\xa7\x6e\x40\x73\xca\xbc\x3d\x16\xa1\x3d\x5b\x9e\x3b\xaa\xce\x4f\x43\xc9\x51\x72\x5e\x43\x6a\x6d\xf2\xbf\x7f\x4e\x6c\x98\x26\xc7\x29\xe5\xc6\xd9\xae\x52\xd4\x94\xfa\x9e\x6a\xc4\x22\x32\x73\x40\xa1\x44\x32\x25\x02\xcd\xa9\x3b\xf9\x29\x50\x5b\x24\xcc\x21\xdd\xc5\xea\x33\xd5\x71\x79\xec\xc2\x47\xd5\x82\xc1\x8e\xe9\xac\x86\xad\x0c\x80\x93\x96\xb6\xfb\x9b\x72\xc4\xd1\x34\x71\x1c\x9c\xd3\x94\x18\xdf\x66\x36\x44\xe8\x71\x66\x15\xc8\x54\xe9\x53\x97\x90\xee\x72\xfd\x74\xb9\x7c\xaa\x66\x44\xa5\x4b\xc7\x17\x95\x43\x61\xaa\xe5\x10\xd5\xda\x07\x5b\x17\xa1\x89\xe0\xa8\x51\xcd\x1f\x6b\x3c\x1c\x50\xb5\x26\x42\x93\x82\xd0\xdf\x9c\x14\x95\x2e\x3b\x49\x41\xe8\xaf\x4e\x0a\x5b\x65\x3c\xf1\x7d\x17\x11\xfa\xaf\xfc\x55\x09\x4e\x1b\x47\x3b\x0f\x2b\x99\x4d\xae\x53\x54\x00\x19\x18\x80\x84\x60\x3e\x41\x69\x32\x71\x5d\x52\x16\x84\x46\x1e\xec\xb5\xa7\x49\xad\xf3\x7f\x64\x00\x39\xb6\xe7\x96\xda\x33\xa9\xf4\x01\x90\xb2\xbe\x7b\x4a\xcf\x83\x33\x19\xa2\x79\x81\x9b\xde\xe4\x06\x45\x67\x05\x92\xa5\x83\xf6\x8b\x18\xd5\xa0\x1e\x90\xb0\xc7\x24\xe4\x09\xcc\xc3\xf2\x20\xb4\x2c\x4b\xcf\x04\xcd\xcf\x02\x0d\x9c\xc7\x8b\x42\x75\x7c\xee\x0a\x99\xe6\xd0\x5c\x31\x0c\x37\x43\xd1\x59\xeb\x14\x66\x91\x36\xac\xa6\xaf\xf4\x32\x3d\x88\xfa\xc9\xbc\xb3\x36\x00\xda\x98\xe6\x1a\x3d\x02\xb2\x4e\x47\x7d\xec\xdc\x2e\x0d\x72\xda\x43\xe3\x25\x53\xa3\x89\x19\x35\x47\xed\xa7\xce\x9f\x32\x4d\xac\xc9\xfb\xe4\x42\x28\x63\x2d\xb6\xaa\xdc\x89\x39\x94\x9d\xed\xd2\xc6\x7a\x0f\x8d\x3e\x15\x4d\x7a\xc9\x0a\x55\x59\xff\x44\x75\x4d\xee\xe1\xb7\x17\x35\x39\x77\xc0\xed\x3b\x5b\xbd\x03\x59\xfd\x1c\xa6\xb2\xf2\xcb\xf8\x8a\xa7\x15\xa8\x4f\x70\x19\xdb\xf1\xf7\x82\x54\x62\x0a\xb5\x51\x73\x6c\xbc\x2c\x13\x88\xf2\x65\x1e\x99\x6c\x6f\xf2\x71\x28\xa9\xe3\xd8\x17\x7f\xf8\xc2\xdb\xb8\x5d\xc3\xa1\xcc\x07\xf6\x60\x39\x58\x01\x28\x8c\x4b\x7b\x1b\x5d\x47\xf1\x54\xe9\xdb\x91\xb3\xd6\x9a\xbb\xd4\xed\x14\xc9\xe8\xd7\xe5\xd9\x6d\x76\x9d\x81\xeb\x60\xc4\xf6\x59\x69\x4e\xfa\xf8\xcf\xab\x5c\x4a\x59\xa2\x29\x4a\xea\xa3\xcb\x86\x6a\x7b\xef\xb7\xb8\xd5\xa1\x54\xed\x9c\x4d\x2e\xa3\x82\xa9\x79\x75\x4d\x72\x36\xfe\x36\x4f\xec\x97\x66\x16\x78\x16\xc3\xb4\x00\x55\xac\x94\xa5\x5f\xa5\xd9\x51\x17\xf9\x81\x29\x17\xbf\xa4\xe3\x6d\xce\x8e\x3a\xc1\x0f\x09\x27\xf9\x22\xa1\xcb\x1f\x09\x4f\x70\x76\x9c\xd4\xbe\x5f\xe2\x4d\x29\xe1\xe6\xe7\xf0\xac\x55\x72\xa5\x8f\xd5\xf7\x29\x79\x1b\xbc\x2f\xbf\x75\xe2\x48\xee\xd8\x31\xd2\x78\x20\x1c\x9b\x39\xf3\xb4\x25\x2c\x9e\xaa\x3d\x5a\x72\xd7\x7a\xe9\x8c\x96\xef\xcb\x8e\x1b\x9b\x23\x56\x30\x55\xb3\x5c\xd3\xa2\xe0\x4f\x0f\x53\x6c\xd4\x11\x44\x58\x3d\xd3\xb7\xa0\xaa\x16\x6c\x4c\x23\x8c\xa6\xf7\xb6\x92\xa8\x4e\x79\x5e\xd5\xe8\xce\x66\xed\xde\xf4\x67\x2a\x05\x6b\x42\x5f\xb3\xa0\x33\xd2\xd9\x95\xf4\xd0\x2f\xe5\xa1\x76\xd6\x76\xd6\x7f\x3f\x05\x32\x98\x19\x34\x72\x4e\x40\xe9\xb9\xdb\x11\x0b\x30\x6c\x89\xcc\x6e\x10\x68\xf5\xfa\xa6\xa7\x6e\xe8\x60\x0d\x6c\x8f\x29\x29\xc6\x85\xc2\x1a\x93\x36\xb8\xc5\xf2\x01\x63\x0a\xe7\xe7\xe7\xe7\x2d\x89\x3c\xea\x39\xff\xfe\xfb\xef\xe1\x2b\xe6\x4b\x14\x7e\x3d\x4c\x21\xea\x12\x14\x06\xe7\xdf\x7d\xf3\xcd\x18\x2e\x09\xc7\xbe\x64\x9c\x60\x51\x88\x04\xb4\x80\x8c\x91\x3c\x64\x77\xa8\xa0\xdd\x0e\xeb\x23\x90\xd2\x88\xba\x75\x7f\x98\xd8\xa5\x80\x3f\x82\x22\x0f\xa7\x28\x0c\x8f\x4d\x35\xcb\xa0\x93\xad\x92\x08\x89\x4f\x8a\x6a\xe7\x6f\xde\xbc\xc9\x9d\x45\xb1\xbe\x5c\x4d\x6f\x15\xe8\x11\x28\xa3\xa3\xbf\x63\x6e\xcf\xaf\x1b\xc3\x1f\x5b\x40\xae\x6d\x1d\x6a\x9e\x61\xe9\x98\x22\x1d\x91\x4c\xa7\x69\x0c\x2b\xd6\x86\xa4\x6a\xac\x49\x99\x01\x34\xf3\x61\x97\x7a\xc8\xd8\x27\xc0\x8f\xc8\x97\xe1\x51\x99\x0b\x3a\xa0\x72\x88\x58\xdb\xfc\xd0\x24\xc2\x9c\xf8\x90\x9e\x93\x91\x16\xb3\x84\x18\x05\x6a\x9a\xd4\x60\x15\x5a\x11\x56\x90\xa3\x38\xc4\xe2\x42\x31\x76\x1b\x1f\xa9\x2e\x41\xcf\x3b\xfa\x0b\x7f\x78\xfc\x7a\xa8\xab\xf8\xd4\x84\x17\x98\x01\x18\x87\x6f\x5f\xbf\x86\xaf\xb6\x48\xe0\xd1\xeb\xf3\xaf\xdb\x8c\x10\x65\x16\x1b\xb0\xdf\x7d\x73\x0e\x5f\x25\x7f\xe1\x0f\xc3\xfd\x5f\x78\x01\xb2\xfe\x3d\x87\xfc\xcd\xeb\xef\x4e\x85\xfc\xfd\xb7\xdf\x1a\xc8\x8f\x0a\xf4\xe3\x90\xfd\x85\x97\x50\x57\xaf\xb3\x0e\x5a\x20\x7e\xf3\x7f\xff\x54\xe8\x1a\xfe\x68\xe3\x0a\xfa\x2c\xa7\x2d\x36\x57\x1f\xda\x13\xe1\x35\xad\x75\xf8\x08\x58\xec\x3a\xd6\xa4\xf8\x68\xa1\x6e\xd8\xdd\x1c\x7a\x43\x42\xc3\x3a\x43\x08\xc9\x27\x0c\x3b\xf1\x8e\xb3\x24\xce\xab\x78\x38\x16\x2e\x5f\x40\x61\xa2\x4c\x4c\xcb\x20\x90\xcb\x0a\x81\xe5\xb8\x03\xe9\x16\xa0\xa5\xe1\x9c\x84\x74\xdb\xea\x14\x7a\xa3\xae\xc6\x33\xac\xe1\xeb\xa7\x1b\x1c\x85\x78\x53\x01\x52\x87\xf1\xf5\x14\x3b\x5f\x41\xec\x27\xa2\x67\x3b\x48\x68\x21\xaf\xaa\x10\xb6\xd4\x72\xc6\xe6\x87\x22\x4d\x9e\x16\x32\x3c\xe8\x83\x8b\xb5\x2f\x9c\x88\x42\xee\xfe\x10\x38\x0e\xf5\xc1\xc7\x8a\x55\x0b\xf1\xe8\xb1\xde\xf3\x6d\x94\xc4\x6c\x5b\x04\xe5\xac\x2f\x05\xf7\x13\x3e\xe6\xb5\x72\xc2\xa4\xd9\x6a\xac\xed\x95\x28\x92\xc1\x0e\xcb\xd6\xe8\x6c\x63\xe5\x4a\xc6\xa0\xb1\x09\xfe\x9a\x5e\xa4\x2d\xc6\x68\x63\xad\x5d\xce\x43\x86\x64\x59\xc0\x34\xb5\x60\xf4\x89\x76\x7a\xae\xb5\x86\xb0\x61\xd5\x0e\x99\x7a\x34\x51\xbf\x34\xcb\x0d\x59\x0d\xa3\x0b\x76\x3a\x13\xdc\xff\x11\xb6\x0c\xfd\x6b\xdb\xba\x2d\xcf\x76\xb6\xee\x57\x57\x34\xd2\x14\x6c\x53\xe1\x3d\xab\x89\xa2\x56\x2b\xcc\x3c\x7d\x13\x50\xe1\x84\x24\x54\x70\x25\x39\xb4\x18\x64\xe6\x71\x9b\x65\x46\x61\x3b\x4c\x31\x6d\xaf\x74\x80\x04\xcd\xed\xc3\x72\x2a\x6a\xd1\x02\x7b\xad\xf9\xb4\xa8\x58\xbb\x6b\x31\x66\x3b\x38\xb7\x35\xa0\x4a\x66\x0e\x0b\x29\x07\x83\xdc\x89\x6c\x50\x6e\x3f\x99\x26\x7f\x94\x90\x70\xda\x5c\x4d\x96\x55\x27\xc4\x2e\xcb\xcb\x61\x5f\x75\xc2\xec\x63\x7f\xd5\xad\xac\x4e\xb0\xfd\xac\xb0\x9a\xad\xd5\x09\xb7\xa7\x2d\x96\x5b\x5c\x9d\x10\x4f\xb3\xc8\x3a\x73\x65\x20\x33\xf7\xbe\xb4\x5d\x66\x9e\xa7\x58\x67\x9d\x40\x15\x1a\xa7\xd9\x68\xdd\x20\x9f\x64\xf4\x74\x2f\x81\x92\x25\xd7\x66\xaf\xb5\xd5\x5d\x9b\x1d\x53\x3f\x31\xda\xab\xe2\xad\x5a\x15\xd3\x2e\xeb\xa1\xbd\x00\x33\xb3\x5c\xb4\xd2\x2d\x55\x62\x76\xd2\xa7\xdf\xc1\x20\xe9\xd3\xb3\xfc\xef\xe9\x84\xe8\x57\x8d\xf6\xeb\x94\xfc\xfd\x0a\xe5\x7e\x4f\x2b\xf5\x3b\xb5\xcc\xef\x4b\x95\xf8\x75\x17\x82\x76\x95\x25\x3f\x25\xe1\xba\xa3\x52\xf5\xd4\xc2\xe3\x12\x2f\xb4\x0c\xb9\xa1\xdc\xf8\xb3\xb3\xa4\xb7\x56\x45\xdc\x6e\x98\x75\xd4\x17\x3b\xfb\x68\x75\xac\x9c\x50\x50\xdc\x9f\x79\x9e\x51\x3c\xdc\xc4\x29\xf7\xb1\x3f\xd1\x95\x6c\x4f\x4f\xed\xf9\x70\x33\x35\x20\x9a\x9a\xb9\xed\xef\x86\xaf\xca\x79\xd6\x37\x53\x30\xad\x20\xbd\x4d\xc7\xde\xe4\x68\xec\xc9\x7a\xb6\x8f\x53\x7b\x37\x5c\x13\x74\xe2\x1d\x3e\x69\x74\x8a\xee\x47\xf7\xb1\x7f\x4a\x5e\x91\xfd\xb2\x7c\x89\x64\x99\xd0\x8d\x2b\xb0\x5e\x1a\x66\x40\x39\x9a\x56\x4e\xcf\x29\xd1\xce\x7c\x64\x82\x01\x27\x9f\xba\x9f\x7d\x2f\xce\x7e\xc9\xfe\xfe\xec\x52\xbf\x7d\x78\xb9\xc8\xc5\x1f\x62\xdf\x14\x52\x9e\xb5\x0d\xac\x5d\xd0\xe1\xda\x65\x6e\x4f\x23\xad\x57\xbd\xdb\xad\xd6\xae\x9d\x91\x3d\xd7\x9d\x6f\xee\xb9\x59\x9b\xe3\xa8\x35\x7f\xe3\xf2\x0d\x6d\x79\xb6\x69\x96\x5d\xfa\xe1\x66\xba\xf1\xcc\x15\x67\xc5\x94\x52\x27\xa1\x26\x57\x57\xe9\x3d\x68\x43\xb8\x59\xce\x3e\x4c\xd6\xde\x66\x39\x99\xbf\xf3\x56\xfa\xe2\x34\xd7\xa4\xb9\x92\x48\x41\x4f\x94\xbb\x67\x67\xd3\x42\xc7\xce\xf7\x0e\x5c\xb2\x76\xe9\x29\xd5\xd3\xf4\x50\xea\xa7\x1c\x2c\x5d\x01\xe1\x6a\x5a\x99\xbf\xe6\x2f\x9e\x78\x5b\x69\x06\xca\x75\x9f\x86\x36\xd7\xcc\xdd\x04\x65\x26\xe3\x7a\xa7\xa3\x79\x40\x9f\x4e\x9e\x25\x52\xe9\xc3\xcb\xcd\xe9\xf0\xa5\xa0\xb1\xb2\xd4\x91\xc0\x40\x24\x04\x24\xd0\x3b\x5a\x8e\x51\xc5\x2f\x85\xca\xa7\xd2\x8f\x9f\x7c\xd1\x50\x09\x6a\xe3\x3d\x4f\x8d\x17\x0d\x35\xc9\xc5\xbf\xb1\xed\x12\x23\xf1\x9c\x74\xda\xbf\xa6\x20\x7a\x2a\x9e\xca\x7c\x67\x9f\x77\xa6\x8b\x4f\x14\x7d\x45\xe1\x7e\x97\xbf\xb1\x2d\xe4\x47\xec\xd7\x57\xec\x5f\x17\x3f\x6c\x96\xde\x64\xb5\x98\x6f\x6e\xe7\x97\xde\xdb\xd9\xdc\xb5\x5e\x55\xab\xd5\x7a\xb2\xbe\x5d\x6d\x56\xde\xf2\xc3\x6c\xea\x6d\x6e\x16\x57\x57\xb3\xf9\xbb\x8d\xb7\x5c\x2e\x96\xd5\xb5\xda\x11\x4e\x1c\x59\x2c\x1d\x3f\x1b\xe5\xe8\x24\x96\x3b\x83\xdc\x35\x00\x67\xa3\x36\xfc\x4b\x1f\x84\x48\xc8\xb5\xbe\xf1\x41\xd1\xac\x7a\xa3\x2b\xb4\x4f\x7c\xdb\xcd\xae\xe0\xbe\x3a\xae\xdc\x57\xdb\xf4\xaa\xd6\x26\x6b\xce\xee\xab\xec\x1a\x96\x19\x04\x1c\x18\xc7\x2e\xa3\xd8\xae\x51\xd7\xd1\x00\x88\xea\x4d\x6a\xd9\x30\x88\xb0\x10\x8e\x43\x36\xfa\x33\xf9\xb5\x01\xd0\x36\x80\xf7\x49\xa4\x8f\x37\x47\x81\xce\x9f\xb3\x5d\xa6\x57\xa9\x2b\x11\x60\x84\x89\x00\xb4\x65\x89\x2c\xde\x15\x5a\x1b\x84\x19\x5e\x35\xf1\xfd\x79\xeb\xf4\x39\x8b\xb4\xf7\x0a\x55\xa2\x8d\x51\xf8\xca\x66\x7d\x8e\x42\x7c\x8f\xc3\xaf\xcb\x0b\xd7\xd4\x1e\x57\x57\x6d\x8d\x06\xe9\x2a\x9e\x2e\xae\xaf\x17\xf3\xfa\x42\x86\xdb\xf9\x8f\xf3\xc5\x4f\xf3\x21\x2c\xbd\x0f\xb3\xd5\x6c\x31\xdf\xbc\x9d\xcc\xae\xcc\xc5\x5b\x0b\xa3\x2e\x2f\xbd\xc9\xe5\xd5\x6c\xee\x6d\xbc\xff\x9a\x7a\xde\xa5\x6b\xf9\x4f\x17\xf3\xf5\x64\x36\xf7\x96\x9b\xeb\xd9\x6a\x35\x9b\xbf\x1b\x16\x7e\xba\xf1\x96\xfa\xd7\xc5\x7c\x73\xe9\xcd\x75\x05\x49\xfe\x72\x76\x6d\x6e\xde\x9a\xdc\xae\xdf\x2f\x96\xb3\xff\xdf\x0e\xdd\xb4\x4e\xdb\x4e\x74\xe9\xc6\xf4\xbd\x37\xfd\x31\xc3\xda\x9b\x4f\x97\x1f\x6f\xf4\x8b\x1f\xbd\x8f\x8e\xbe\x6b\xd0\x2b\x5f\x94\xc1\xad\xbc\xe9\xd2\x5b\xaf\xec\x65\x5e\x95\x97\x3f\x4d\x66\x6b\x25\x21\xde\x2e\x96\x9b\xc5\x8d\xb7\xd4\xf8\xd4\x3b\x98\x5d\x5f\x7b\x97\x33\x6d\x30\x78\xeb\xe5\xc7\x21\xdc\x2c\x56\xeb\x9b\xc5\xdc\xbb\x4c\x7f\x48\x6f\x79\x7d\x82\x94\x6c\x11\x87\xa3\x5c\x31\xf5\x12\x94\x0d\x3c\x52\x6b\x67\x59\xa6\xf6\x7b\x85\x83\x6a\xef\x9b\x19\xca\x81\x4a\x85\x9f\x5a\x5a\xd4\xa6\xb8\xa5\x6d\x9d\xdb\x3a\x1b\x37\x33\x5b\xed\xd3\x2e\xde\xeb\xfa\xa0\x15\x78\x0b\x27\xd6\xda\x3a\x19\xb3\xd6\xaa\xc2\x97\xf5\xf9\x2a\xb3\x69\xfd\x7b\xcb\xb5\x15\x01\x5b\xe4\xc8\xe7\xd7\x17\x3d\x4f\xe0\x16\x61\x9c\x6e\x1a\x65\xd9\xfa\x2d\xf6\x51\x6d\xb5\x67\xab\xa0\x2e\x6b\x6f\xbc\xf9\xa5\x96\x8d\x4b\x4f\x19\x19\x9e\xfa\x63\x3d\x5b\xaa\x57\xfa\x0f\xfb\x6e\xba\xf4\x26\x6a\xfa\xea\x92\xe4\xbd\x37\xb9\x5a\xbf\xb7\x73\x9f\x33\xaa\xb6\x50\x86\x30\xbd\x5d\xad\x17\xd7\x8a\xa9\x37\x37\x93\xf5\xfb\xcd\xd2\x5b\xdd\x2c\xe6\x2b\x6f\x93\xf5\x7b\x3d\x9b\x6f\x66\xf3\xd5\x7a\x32\x9f\x7a\xab\xcd\x7c\xb1\xde\xdc\x2c\x17\x06\x5d\x97\x64\x9c\x4c\xd7\xb3\x0f\x8a\x3b\xec\x88\xae\x66\xd7\xb3\xb5\x1a\xd7\xf4\xbd\xc2\x79\xbe\xd8\x5c\x7a\x37\x57\x8b\x8f\xd7\xde\x7c\xfd\xa5\x0c\xbc\xd3\x44\x56\x23\xb1\xeb\xcc\x6c\x68\xe0\x80\xa0\xa7\xc2\xf1\xbb\x9e\x19\xf7\xef\x6e\x40\xe9\xbc\xd5\x5e\xb5\x4d\x5b\x5d\xfe\x74\xcc\x62\xed\x83\xd6\x49\xad\xb5\x6e\x9d\xd3\x5a\xeb\xd2\x14\x97\xde\x0a\x7c\x8f\x39\x91\x0d\x77\xec\xf4\xab\x23\x32\x10\x9e\xb6\xb6\xd3\xaf\x3b\x57\xf5\x7b\x93\x93\xac\x4f\xca\x8a\x39\x96\xe9\x65\x53\x22\xdb\xd8\x66\xab\x7b\xa8\xad\x62\xb6\x03\x4f\xdf\x62\x56\x5b\x0f\x3f\x21\xae\x76\xbd\x43\x98\xd1\x1d\xab\xef\x94\x56\xde\x07\x6f\x39\x5b\x7f\xac\x14\xcb\x9a\xc5\xf9\xd3\x64\x39\x77\xae\xe9\xd9\xfc\xed\xa2\x5f\x29\xac\x0b\x7e\x5d\xa5\x38\x99\xca\xf6\xee\x10\xe1\x6f\x17\xe5\x69\x95\x95\xbb\xb0\xe0\xb4\x39\xad\xdd\x1a\xd7\x7f\x42\xd5\xa7\x9d\xb3\xb9\x2a\x5e\x4e\xd6\xb6\x6d\x55\x9b\x39\xaf\xdd\xc7\x34\x5d\xcc\x2f\x67\x5a\xf9\x66\x02\x32\xff\x69\xe9\x4d\x17\xf3\xe9\xec\xaa\xf2\x73\x6a\xfa\xe5\xbf\xac\x6e\xa7\xc6\x88\xe9\x39\x89\x55\xbc\x5c\x06\x48\x19\xad\x96\x16\x05\x2c\x5b\x5a\x35\x58\x09\x8e\x31\xbc\xa8\x4d\xfb\x93\x59\x61\x7d\x8c\x5b\x77\x7a\x03\x05\x2a\xbb\xf6\x43\x57\x0d\x44\x51\x42\xf5\xfd\xe5\xd9\x3d\x74\x49\x56\x36\xd8\x72\xe1\x1f\xe4\x77\x44\x1b\xd7\x10\x0a\x45\x21\x03\xba\x78\xb8\x8b\xb9\x5d\xfe\xcc\xba\x93\xd2\x63\x5d\x88\xf5\x90\x8b\x18\xfb\x67\xda\x47\x25\xc6\x51\xf0\x4a\xff\x35\xca\x6f\x97\x1b\x21\x1a\x8c\x38\x8e\x19\x97\x2e\x27\xac\x1a\xb0\x48\xf7\x6e\x92\xe9\xf3\xf9\xb2\x33\x9b\x80\x50\x3f\x4c\x02\x7c\x01\x7f\x84\x97\xfa\xce\xee\x97\x17\xb0\xe6\x09\x86\x87\x03\x36\x21\x9e\x65\x53\x55\xa7\xae\xf8\x44\xc1\xb1\x10\x24\xb6\x77\x00\xd6\x7d\x8b\xd5\x0c\xb8\xea\xa4\x98\xef\x9a\xbc\x80\x2b\x9b\x44\x26\xd2\x4a\x8e\x80\xa8\xa9\xde\x26\x12\xa7\x5d\x9a\xeb\x28\x11\xf8\x2c\x0c\x6d\x7d\x0b\xdb\x65\xd6\x5a\xd9\x1b\xb8\xc5\x21\xa3\x7b\x9b\x2c\x5c\x28\xca\x36\x75\xdf\x16\x1e\x11\xf6\xfa\x40\x9b\xd0\x92\x17\x4c\xda\x3c\x96\x32\x4c\xc9\xe0\xf5\xf9\xf9\xff\xc9\x3e\xb7\x90\x6d\x98\xf4\x4e\x93\xf6\xce\x59\x31\xdd\x99\xe8\xd5\x95\xda\xe5\x48\xe6\x6a\x8c\x2c\x35\x08\xbd\x1a\xf9\xdb\x82\x2d\x31\xe6\xbe\xe3\x40\x8a\xae\x8c\xa9\xf6\x1c\xa9\x6a\xec\xdf\xf4\x51\x6b\xd6\xc0\x16\x16\xa5\x74\x51\x96\x26\xa1\x14\xba\x6a\x38\x9b\x2f\x28\x1c\x58\xaf\x93\x81\x48\x29\xb3\xb2\x78\x2d\x79\x7e\x69\x6e\x76\x7a\xd4\xeb\xf3\x73\x07\xcc\x14\x27\x1b\xee\x36\x47\x6e\x5a\x4a\xaf\x11\xdf\x63\x69\xae\x2e\xab\x46\xb9\x9a\xce\x40\xe8\x88\xb5\xf4\x3b\x07\xa1\x16\x3a\x4e\xef\xd1\x67\xf6\x10\x3e\x7d\x35\x4a\x7a\xa7\xab\x16\x29\xd6\xd5\x6e\x10\x1f\xba\xcf\x9c\x4e\x09\x8e\xc2\xec\x9e\x6e\xa2\x53\xfd\x79\xc3\x11\x01\x12\xd5\x0e\x3a\x38\x6d\x7c\x6b\x54\x6f\x52\xce\x8d\x35\xae\x39\xac\xcb\x57\xcc\x99\x5c\x92\xc1\x16\x43\x8c\x78\xc6\x28\xb7\xcb\x99\x39\x47\xd5\x0f\x13\x41\xee\xb1\xf3\xcc\xd4\xfc\x56\x08\x4d\x14\xa9\xe7\xae\x36\x20\xe7\x91\x5e\x27\x8d\xa8\xae\x98\x3a\xd6\xab\xfa\xc2\x19\x0b\xab\x9f\xc4\x52\x98\x18\xad\xdf\x32\x67\x5d\xb6\x54\xcc\xa8\x9a\x3d\x76\xb9\xcf\xce\x86\xa0\x36\xeb\xc9\xf2\x9d\xb7\xde\x4c\xae\xae\x16\x53\xe3\x85\x58\x7f\xbc\xa9\x5c\x9a\xdf\xd1\xf6\x6a\xb2\xf6\x56\x6b\x57\x98\xad\xe3\xc3\x74\x9f\x50\x8f\xb5\xb9\x23\x6d\xa3\x13\xb0\x3e\xfd\x63\x33\x8c\xd3\xbf\x4b\x47\x51\xd5\x9f\x2b\x7b\x85\xee\x53\xf5\x68\xfa\x7d\xa1\xe1\x13\xc2\x6d\x97\x69\x78\xcc\x5a\x3c\xc5\xf8\x98\xce\x0e\x64\xdc\x16\xbc\x57\xb9\xbb\xcc\x54\xe2\x77\x0b\x8c\x9d\x74\x59\xfc\x6f\xa0\x60\x1d\xf3\xf2\x2f\xaa\x68\xff\xb1\x74\x5a\x51\xd0\xe9\x52\xae\xfa\x51\x15\xbf\xa5\x42\x32\xbb\x08\xad\x92\x92\x42\xad\x41\x5d\x19\xb5\xde\xdc\xf7\x0f\xa6\x8c\x52\xde\xfe\xb7\x52\xfa\x57\x53\x4a\xe6\x49\x38\x79\x1e\x4b\xdd\x72\xd2\xce\x13\x97\x44\xc4\x21\x3a\x9a\xf5\x61\xa6\x5e\xad\x89\x9c\xb9\x13\x12\xd4\x35\x62\xad\xf3\x6a\xb7\x24\x78\x9e\x1a\x54\x6c\xae\xb6\x96\x42\x90\x3d\xc5\x41\x7a\x72\x98\x39\xbb\x67\x47\x30\xcf\xfc\xf2\x92\x93\x7d\xed\x06\x9d\xec\x08\x59\x5d\x52\x00\xb7\xb7\xb3\xcb\x6f\xd2\xd5\x8f\x68\x00\xfb\x04\x71\x44\x25\x36\xce\x06\x8e\xd5\xb6\x3f\xbf\x46\x0c\x12\x2a\x49\x58\x2b\x5c\x4d\xf7\xef\x0a\x66\x80\x43\x2c\xcb\x37\x91\xf6\xd2\x7b\x49\x1c\x20\x89\xab\x59\x02\x0d\x44\x6d\xcb\x0e\xa8\x12\x3c\x83\xfb\x3c\xba\xaf\xed\x35\xff\xa3\x88\x05\xe6\x2a\x02\xd5\xef\xe9\xc3\x2c\xf3\x6d\x4f\xa6\x29\xf1\xea\x13\x91\xd7\x33\xa9\x84\x3a\xa1\x45\x45\x94\x9e\x00\xa7\x15\x91\xf6\xf7\xb8\x6c\xa7\xde\xe3\xfc\xdf\x00\x00\x00\xff\xff\x9e\x66\xfb\x3d\x8d\xab\x00\x00"), }, "/storage": &vfsgen۰DirInfo{ name: "storage", @@ -840,9 +840,9 @@ var Assets = func() http.FileSystem { "/vpcaccess/beta/connector.yaml": &vfsgen۰CompressedFileInfo{ name: "connector.yaml", modTime: time.Time{}, - uncompressedSize: 6695, + uncompressedSize: 6692, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x58\x5f\x93\xe2\xb8\x11\x7f\xe7\x53\x74\xed\x3c\xcc\xdd\x16\xd8\x2c\x77\x75\x95\x22\x4f\x84\x61\x2f\x54\x58\xa0\x80\xdd\xab\x7d\xca\x0a\xbb\x6d\x2b\x2b\x4b\x3e\xfd\x19\xa0\xb6\xf8\xee\x29\x59\x36\xb6\x31\xcc\x0c\xb3\xb9\xe4\x32\x2f\x83\xa5\x56\xeb\xf7\xeb\x7f\x6a\xe9\x0e\xc6\x22\x3b\x48\x1a\x27\x1a\x06\xfd\xc1\x00\x7e\x15\x22\x66\x08\xb3\xd9\xd8\x83\x11\x63\xb0\xb2\x53\x0a\x56\xa8\x50\x3e\x62\xe8\x75\xee\xa0\x73\x07\x33\x1a\x20\x57\x18\x82\xe1\x21\x4a\xd0\x09\xc2\x28\x23\x41\x82\xe5\x4c\x17\x3e\xa1\x54\x54\x70\x18\x78\x7d\xf8\xc1\x0a\xbc\x29\xa6\xde\xfc\xf8\xd7\xce\x1d\x1c\x84\x81\x94\x1c\x80\x0b\x0d\x46\x21\xe8\x84\x2a\x88\x28\x43\xc0\x7d\x80\x99\x06\xca\x21\x10\x69\xc6\x28\xe1\x01\xc2\x8e\xea\x24\xdf\xa6\x50\x62\x71\x7c\x2e\x54\x88\xad\x26\x94\x03\x81\x40\x64\x07\x10\x51\x5d\x0e\x88\x76\x88\xed\x5f\xa2\x75\x36\xf4\xfd\xdd\x6e\xe7\x91\x1c\xad\x27\x64\xec\x33\x27\xa9\xfc\xd9\x74\x3c\x99\xaf\x27\xbd\x81\xd7\x77\x6b\x3e\x72\x86\x4a\x81\xc4\xdf\x0d\x95\x18\xc2\xf6\x00\x24\xcb\x18\x0d\xc8\x96\x21\x30\xb2\x03\x21\x81\xc4\x12\x31\x04\x2d\x2c\xe2\x9d\xa4\x9a\xf2\xb8\x0b\x4a\x44\x7a\x47\x24\x76\xee\x20\xa4\x4a\x4b\xba\x35\xba\x61\xae\x12\x1f\x55\x0d\x01\xc1\x81\x70\x78\x33\x5a\xc3\x74\xfd\x06\xfe\x36\x5a\x4f\xd7\xdd\xce\x1d\xfc\x36\xdd\xfc\x7d\xf1\x71\x03\xbf\x8d\x56\xab\xd1\x7c\x33\x9d\xac\x61\xb1\x82\xf1\x62\xfe\x30\xdd\x4c\x17\xf3\x35\x2c\xde\xc3\x68\xfe\x19\xfe\x31\x9d\x3f\x74\x01\xa9\x4e\x50\x02\xee\x33\x69\xf1\x0b\x09\xd4\x1a\xd2\x79\x6f\x8d\xd8\x00\x10\x09\x07\x48\x65\x18\xd0\x88\x06\xc0\x08\x8f\x0d\x89\x11\x62\xf1\x88\x92\x53\x1e\x43\x86\x32\xa5\xca\xba\x53\x01\xe1\x61\xe7\x0e\x18\x4d\xa9\x26\x3a\x1f\x69\x91\xf2\x3a\x94\x47\x62\xd8\x01\xd0\x54\x33\x1c\xc2\xa7\xe5\x78\x14\x04\xa8\x94\x3f\x16\x9c\x63\xa0\x85\xec\x00\x84\xa8\x02\x49\x33\xab\x64\x08\x9b\x04\x2b\x31\x38\x89\x81\x44\x25\x8c\x0c\xb0\x03\xb0\xef\x85\x01\xeb\x29\x2d\x4d\xa0\x7b\x9c\xa4\x38\x84\xba\x3a\x37\x9d\x10\xd5\xa3\x24\x1d\x42\x44\x98\xc2\x4e\x46\x74\xa2\x2c\x92\x18\xb5\xfd\x77\x61\xd7\xc8\xf0\xc0\x7e\xd9\x30\xcc\xfd\x18\xa3\x8d\xbe\x48\xc8\x34\x27\x08\x64\x2b\x8c\x06\xd2\xd8\x0c\x20\x23\x92\xa4\xa8\x51\x2a\xa7\xb7\x07\x6d\x48\xf6\xaf\x8c\x9e\x21\x68\x69\xb0\x18\x6c\x80\x18\x41\x64\x18\x03\xca\x95\xce\x63\x5d\x44\x67\x9b\xd9\xa0\x3b\xbc\x14\x7d\x2e\xfc\xe7\xc2\x1f\x22\x43\x8d\x2f\x25\xe0\xa4\xff\xf7\x78\x47\x8c\xdd\x0a\x99\xb1\x17\x82\xce\xa4\xf8\x17\x06\xfa\x29\xc8\x2a\x48\x30\x25\xc3\xe2\x0b\x40\x1f\x32\x1c\x82\xad\x14\x3c\x6e\xe8\x62\x22\xc8\xfd\xfc\x1d\xca\x18\x55\x2f\xce\x0e\x2b\x7b\x21\xbc\x52\xc2\x0f\xff\x8f\xe4\xed\x19\x23\x38\x72\x9d\x83\x74\xa2\x05\xde\x13\x9b\x72\x69\x51\xcd\xce\x23\xce\x15\x1e\x1a\x9e\x98\x29\xff\xdb\xb7\xe2\xe7\xf1\xe8\x97\x10\xed\x68\xf9\xfb\x78\xf4\x83\x52\x8b\x1d\xb7\x64\x8e\xc7\x86\xbe\x8c\x48\xe4\xba\x17\x08\x6e\x4f\x37\x94\xe7\x76\xab\xca\x5d\x20\x91\x68\x6c\x50\xbe\x58\x0b\xeb\x53\x12\x49\xd8\xd3\x34\x45\x61\xf4\x10\xfa\x8d\xb9\xbc\x82\x5c\x9b\x74\xa1\xde\x9e\x75\x46\x15\xdb\x4b\x9e\x2d\xbe\x9d\xcf\x4e\x1f\x4d\x3a\xbd\x73\x57\x66\x52\x64\x28\x35\x45\x55\x79\xae\xb0\x19\x86\xcb\xc2\xd0\xd5\x54\x89\x80\x48\x49\x0e\xb5\x51\x87\x3a\x16\xcd\x03\xa3\xd2\x50\x13\xb5\x36\x59\x70\x76\x68\x58\xb2\x95\x14\x0b\xa3\x33\xa3\x41\x70\x76\xf0\x60\x66\x93\x41\x44\x27\xc7\x83\x51\xf6\xbc\xb4\x67\xe1\xc9\xbf\x5e\x03\xcd\x57\xb3\x45\xc9\x51\xa3\xea\xd1\x34\x35\xda\xb6\x12\xad\x0d\x1d\x68\x9b\x69\x3d\xc7\xca\xfe\xac\xcd\x53\x8d\x69\x83\xfb\x85\x0c\x69\x19\xe0\xa2\x04\xcd\xc6\x34\x94\x2b\xc2\x63\x6c\xdb\xb2\xa5\xed\xcc\x98\xd3\xe5\x69\xf1\x35\x6b\xdd\xdb\x1a\x22\xad\x84\x35\x13\xe5\x1a\x25\x27\x0c\x48\x18\xda\xd6\x04\x15\xe8\x84\x68\x88\x04\x63\x62\xa7\x60\xf5\x7e\x0c\x3f\xff\xf2\xd3\xc0\x76\x85\x79\x2c\x78\x0d\x2a\x93\x3d\x49\x33\x6b\xae\x2f\xef\xfa\xde\xbb\x9f\x06\x5e\xdf\xeb\xfb\x83\xbf\x7c\xf1\xee\x6f\x35\x71\x19\x6b\xb7\x73\x9e\x35\xa3\xb4\x45\xd8\xf2\x2d\xb5\x9f\x7a\xab\x5a\x13\x73\x13\xcc\x94\x04\x09\xe5\xb8\xb1\xc8\x6e\x46\xfa\xa1\x5a\x7c\x0d\x6c\x21\x92\x2b\xb4\xfe\xf9\xf4\x01\xa6\xe5\xa9\x98\x77\x75\xec\x60\xc3\xb9\x0a\x65\x78\xc0\x88\x18\xa6\x81\xaa\x86\x67\x70\xd0\x4b\x69\x20\xc5\xed\x04\xf7\xe5\x86\x17\x72\xd9\x86\x4b\x8c\xb2\x36\xee\x4e\x9d\x7c\xe6\x97\x9f\x9f\xa2\x5e\xa9\xbd\xce\x7d\x4f\x53\x93\xc2\x23\x61\xa6\x08\xce\x62\x85\x6d\xe4\x89\xd1\x42\x05\x84\x59\xfa\xb1\x14\x26\xab\xd9\xa3\x99\x77\xdf\x95\xea\x29\xd9\x6f\x12\x29\x4c\x9c\x64\x46\xff\x67\x0d\x50\xe9\x7d\xce\x02\xfa\x24\x59\x5e\x9e\x4e\x84\xac\x29\x3e\x6c\x33\x55\x77\x3c\x0c\xfa\xfd\x6e\xc3\x06\x29\xd9\xdb\xf1\x77\xfd\x7e\xff\x76\x0b\x50\xfe\x87\x44\x40\x4d\xed\x55\xfe\x94\xff\x09\x22\x80\xf2\x3f\x26\x02\xea\x7a\x9f\xb3\xc0\x4d\x11\x40\x78\x68\x51\x37\x6c\xe0\xa2\xe2\x66\xf2\x39\xd4\x9b\x0b\xdb\xbc\xea\x22\x5a\x8c\x36\xb5\x6a\x9b\xab\xb7\xf8\x2d\x21\x67\x36\xf8\x72\x6a\xd1\xde\xd6\x1a\xb3\xb7\xf5\x6e\xec\xed\x97\xdb\x79\xa0\xde\x09\xf9\xf5\x15\x54\xdc\xc2\x6b\x6c\x2c\x53\x77\x3d\xf9\xb4\x1c\x97\xbb\xbc\xba\x9f\x90\x18\xa1\xc4\xf3\x44\xeb\x9d\xec\x65\xbb\xa3\x34\x33\x1a\xfd\x36\x2c\x80\x88\x22\x0b\x87\xf5\x16\x0e\xca\xb6\xe7\x76\xde\xcb\x46\xef\x77\xd1\x8b\x85\xee\xef\x3e\x43\x6f\x60\xcf\x84\x09\xcb\xcf\x94\x70\x12\xa3\xf4\xdb\x48\x2f\x9b\x02\xdc\x85\x07\xb9\x3e\xdb\x5f\x69\xa2\x5f\x11\xe4\x6b\xbb\xec\xd2\xb4\x5b\x7e\xba\x83\xe4\x72\x13\x6e\xd2\x9b\x1b\xd9\xfb\x46\x27\x9b\xeb\x29\xb3\xdf\x46\x1b\x71\x2f\x31\xb5\xb3\x7f\x29\x94\xa2\x5b\x86\xae\x60\x9e\xf5\x9f\xeb\xcd\x68\x33\xf9\xe7\xc7\xf9\x7a\x39\x19\x4f\xdf\x4f\x27\x0f\x5d\x58\x4d\x46\x0f\x9f\xbb\x30\x5e\x4d\x46\x9b\xe9\xfc\xd7\x2e\x3c\x4c\x66\x13\xf7\x6b\xb2\x5a\x2d\x56\x5d\xf8\xb8\x7c\xc8\xa7\x6e\xee\xdf\x00\x90\x9b\xb4\xe9\xc8\x16\x82\xc6\x6c\x0e\xa6\x31\x52\xe2\x6a\x0c\x96\x10\x1b\x83\x39\xda\xc6\x48\x09\xbc\xf2\xb2\xd9\x72\xbc\x90\x08\x8d\x0b\xd1\x25\x37\xe7\xeb\x5e\xe2\xe7\x73\xc1\x56\xc2\x38\x08\xf9\x2b\x64\x42\x83\xc4\xde\xd5\x13\xe1\x9e\x56\x9d\x47\xcf\xdf\xd6\x5e\x51\x48\x2e\x5d\xca\xe0\x42\x21\x87\xa7\x6e\x24\xcf\x15\xf4\x16\xbb\x7b\x47\xde\xd5\xf3\x1f\x24\x32\xa2\xe9\x23\x76\xf3\xb7\xe3\xc8\x30\x76\x80\xdf\x0d\x61\x34\xa2\x18\xfe\xe8\xc1\xc4\x8b\x3d\xa0\x79\x20\x9f\x29\x05\xf7\xf0\x53\x18\x4a\x21\x8b\x66\x94\x7f\xb5\x87\x57\xa2\x75\xa6\x86\xbe\x1f\xb8\x02\xe8\xc5\xf9\x2b\x38\xc9\xa8\xf2\x02\x91\x96\xc3\xfe\xe3\x3b\xbf\xba\xe2\x97\x37\x7c\x5f\x62\xec\xae\xf7\xee\xc7\xd1\x77\x1b\xd8\x0a\xaa\xfc\x6f\xee\xc3\x52\x3c\xb6\xe0\xb8\x93\x56\x4a\x5b\xe7\x28\xb7\xc9\xe8\xaa\x5d\xfe\x14\x8e\x2c\x84\x9d\x30\x2c\x84\x2d\x0e\xa1\xae\xe7\xbe\x65\xce\x17\x39\x0f\x9e\xa9\x85\x70\xe5\x34\x58\x9f\xe8\xb4\xed\x79\xa5\x12\xc2\xb5\x6a\x08\xd5\xa9\x31\x0d\x5f\x1f\x31\xcb\x52\xc5\x53\x61\x53\x08\xd5\x32\xa2\xca\x12\xdc\x53\xa5\x95\x07\xd3\x28\x8f\x22\x85\xba\xeb\xec\x9e\x5d\xa8\xf7\xe0\x3a\x1c\xa2\x94\x49\xdd\x23\xd8\xd6\x65\x55\xfd\x8c\xaa\xb6\xa8\x7a\x27\xf7\x32\x93\x3f\x84\xa0\x6a\xeb\xdc\x11\x05\x54\x29\x83\xa1\xf7\x5f\xf2\xe8\x4b\x4f\xb8\x73\xdf\xfe\x3b\x00\x00\xff\xff\x44\x8b\xc2\xbb\x27\x1a\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x58\x5f\x93\xe2\xb8\x11\x7f\xe7\x53\x74\xed\x3c\xec\xdd\x16\xd8\x2c\x77\x75\x95\x22\x4f\x84\x61\x2f\x54\x58\xa0\x80\xdd\xab\x7d\xca\x08\xbb\x6d\x2b\x2b\x4b\x3e\xfd\x19\xa0\xb6\xf8\xee\x29\x59\x36\xb6\x31\xcc\x0c\xb3\xb9\xe4\x32\x2f\x83\xa5\x56\xeb\xf7\xeb\x7f\x6a\xe9\x0e\xc6\x22\x3b\x48\x1a\x27\x1a\x06\xfd\xc1\x00\x7e\x15\x22\x66\x08\xb3\xd9\xd8\x83\x11\x63\xb0\xb2\x53\x0a\x56\xa8\x50\x3e\x62\xe8\x75\xee\xa0\x73\x07\x33\x1a\x20\x57\x18\x82\xe1\x21\x4a\xd0\x09\xc2\x28\x23\x41\x82\xe5\x4c\x17\x3e\xa3\x54\x54\x70\x18\x78\x7d\xf8\xc1\x0a\xbc\x29\xa6\xde\xfc\xf8\xd7\xce\x1d\x1c\x84\x81\x94\x1c\x80\x0b\x0d\x46\x21\xe8\x84\x2a\x88\x28\x43\xc0\x7d\x80\x99\x06\xca\x21\x10\x69\xc6\x28\xe1\x01\xc2\x8e\xea\x24\xdf\xa6\x50\x62\x71\x7c\x29\x54\x88\xad\x26\x94\x03\x81\x40\x64\x07\x10\x51\x5d\x0e\x88\x76\x88\xed\x5f\xa2\x75\x36\xf4\xfd\xdd\x6e\xe7\x91\x1c\xad\x27\x64\xec\x33\x27\xa9\xfc\xd9\x74\x3c\x99\xaf\x27\xbd\x81\xd7\x77\x6b\x3e\x71\x86\x4a\x81\xc4\xdf\x0d\x95\x18\xc2\xf6\x00\x24\xcb\x18\x0d\xc8\x96\x21\x30\xb2\x03\x21\x81\xc4\x12\x31\x04\x2d\x2c\xe2\x9d\xa4\x9a\xf2\xb8\x0b\x4a\x44\x7a\x47\x24\x76\xee\x20\xa4\x4a\x4b\xba\x35\xba\x61\xae\x12\x1f\x55\x0d\x01\xc1\x81\x70\x78\x33\x5a\xc3\x74\xfd\x06\xfe\x36\x5a\x4f\xd7\xdd\xce\x1d\xfc\x36\xdd\xfc\x7d\xf1\x69\x03\xbf\x8d\x56\xab\xd1\x7c\x33\x9d\xac\x61\xb1\x82\xf1\x62\x7e\x3f\xdd\x4c\x17\xf3\x35\x2c\x3e\xc0\x68\xfe\x05\xfe\x31\x9d\xdf\x77\x01\xa9\x4e\x50\x02\xee\x33\x69\xf1\x0b\x09\xd4\x1a\xd2\x79\x6f\x8d\xd8\x00\x10\x09\x07\x48\x65\x18\xd0\x88\x06\xc0\x08\x8f\x0d\x89\x11\x62\xf1\x88\x92\x53\x1e\x43\x86\x32\xa5\xca\xba\x53\x01\xe1\x61\xe7\x0e\x18\x4d\xa9\x26\x3a\x1f\x69\x91\xf2\x3a\x94\x47\x62\xd8\x01\xd0\x54\x33\x1c\xc2\xe7\xe5\x78\x14\x04\xa8\x94\x3f\x16\x9c\x63\xa0\x85\xec\x00\x84\xa8\x02\x49\x33\xab\x64\x08\x9b\x04\x2b\x31\x38\x89\x81\x44\x25\x8c\x0c\xb0\x03\xb0\xef\x85\x01\xeb\x29\x2d\x4d\xa0\x7b\x9c\xa4\x38\x84\xba\x3a\x37\x9d\x10\xd5\xa3\x24\x1d\x42\x44\x98\xc2\x4e\x46\x74\xa2\x2c\x92\x18\xb5\xfd\x77\x61\xd7\xc8\xf0\xc0\x7e\xd9\x30\xcc\xfd\x18\xa3\x8d\xbe\x48\xc8\x34\x27\x08\x64\x2b\x8c\x06\xd2\xd8\x0c\x20\x23\x92\xa4\xa8\x51\x2a\xa7\xb7\x07\x6d\x48\xf6\xaf\x8c\x9e\x21\x68\x69\xb0\x18\x6c\x80\x18\x41\x64\x18\x03\xca\x95\xce\x63\x5d\x44\x67\x9b\xd9\xa0\x3b\xbc\x14\x7d\x2e\xfc\xe7\xc2\x1f\x22\x43\x8d\x2f\x25\xe0\xa4\xff\xf7\x78\x47\x8c\xdd\x0a\x99\xb1\x17\x82\xce\xa4\xf8\x17\x06\xfa\x29\xc8\x2a\x48\x30\x25\xc3\xe2\x0b\x40\x1f\x32\x1c\x82\xad\x14\x3c\x6e\xe8\x62\x22\xc8\xfd\xfc\x1d\xca\x18\x55\x2f\xce\x0e\x2b\x7b\x21\xbc\x52\xc2\x0f\xff\x8f\xe4\xed\x19\x23\x38\x72\x9d\x83\x74\xa2\x05\xde\x13\x9b\x72\x69\x51\xcd\xce\x23\xce\x15\x1e\x1a\x9e\x98\x29\xff\xdb\xb7\xe2\xe7\xf1\xe8\x97\x10\xed\x68\xf9\xfb\x78\xf4\x83\x52\x8b\x1d\xb7\x64\x8e\xc7\x86\xbe\x8c\x48\xe4\xba\x17\x08\x6e\x4f\x37\x94\xe7\x76\xab\xca\x5d\x20\x91\x68\x6c\x50\xbe\x58\x0b\xeb\x53\x12\x49\xd8\xd3\x34\x45\x61\xf4\x10\xfa\x8d\xb9\xbc\x82\x5c\x9b\x74\xa1\xde\x9e\x75\x46\x15\xdb\x4b\x9e\x2d\xbe\x9d\xcf\x4e\x1f\x4d\x3a\xbd\x73\x57\x66\x52\x64\x28\x35\x45\x55\x79\xae\xb0\x19\x86\xcb\xc2\xd0\xd5\x54\x89\x80\x48\x49\x0e\xb5\x51\x87\x3a\x16\xcd\x03\xa3\xd2\x50\x13\xb5\x36\x59\x70\x76\x68\x58\xb2\x95\x14\x0b\xa3\x33\xa3\x41\x70\x76\xf0\x60\x66\x93\x41\x44\x27\xc7\x83\x51\xf6\xbc\xb4\x67\xe1\xc9\xbf\x5e\x03\xcd\x57\xb3\x45\xc9\x51\xa3\xea\xd1\x34\x35\xda\xb6\x12\xad\x0d\x1d\x68\x9b\x69\x3d\xc7\xca\xfe\xac\xcd\x53\x8d\x69\x83\xfb\x85\x0c\x69\x19\xe0\xa2\x04\xcd\xc6\x34\x94\x2b\xc2\x63\x6c\xdb\xb2\xa5\xed\xcc\x98\xd3\xe5\x69\xf1\x35\x6b\xbd\xb5\x35\x44\x5a\x09\x6b\x26\xca\x35\x4a\x4e\x18\x90\x30\xb4\xad\x09\x2a\xd0\x09\xd1\x10\x09\xc6\xc4\x4e\xc1\xea\xc3\x18\x7e\xfe\xe5\xa7\x81\xed\x0a\xf3\x58\xf0\x1a\x54\x26\x7b\x92\x66\xd6\x5c\x0f\xef\xfb\xde\xfb\x9f\x06\x5e\xdf\xeb\xfb\x83\xbf\x3c\x78\x6f\x6f\x35\x71\x19\x6b\xb7\x73\x9e\x35\xa3\xb4\x45\xd8\xf2\x2d\xb5\x9f\x7a\xab\x5a\x13\x73\x13\xcc\x94\x04\x09\xe5\xb8\xb1\xc8\x6e\x46\xfa\xb1\x5a\x7c\x0d\x6c\x21\x92\x2b\xb4\xfe\xf9\xfc\x11\xa6\xe5\xa9\x98\x77\x75\xec\x60\xc3\xb9\x0a\x65\xb8\xc7\x88\x18\xa6\x81\xaa\x86\x67\x70\xd0\x4b\x69\x20\xc5\xed\x04\xf7\xe5\x86\x17\x72\xd9\x86\x4b\x8c\xb2\x36\xee\x4e\x9d\x7c\xe6\x97\x9f\x9f\xa2\x5e\xa9\xbd\xce\x7d\x4f\x53\x93\xc2\x23\x61\xa6\x08\xce\x62\x85\x6d\xe4\x89\xd1\x42\x05\x84\x59\xfa\xb1\x14\x26\xab\xd9\xa3\x99\x77\xdf\x95\xea\x29\xd9\x6f\x12\x29\x4c\x9c\x64\x46\xff\x67\x0d\x50\xe9\x7d\xce\x02\xfa\x24\x59\x5e\x9e\x4e\x84\xac\x29\x3e\x6e\x33\x55\x77\x3c\x0c\xfa\xfd\x6e\xc3\x06\x29\xd9\xdb\xf1\xf7\xfd\x7e\xff\x76\x0b\x50\xfe\x87\x44\x40\x4d\xed\x55\xfe\x94\xff\x09\x22\x80\xf2\x3f\x26\x02\xea\x7a\x9f\xb3\xc0\x4d\x11\x40\x78\x68\x51\x37\x6c\xe0\xa2\xe2\x66\xf2\x39\xd4\x9b\x0b\xdb\xbc\xea\x22\x5a\x8c\x36\xb5\x6a\x9b\xab\xb7\xf8\x2d\x21\x67\x36\x78\x38\xb5\x68\xef\x6a\x8d\xd9\xbb\x7a\x37\xf6\xee\xe1\x76\x1e\xa8\x77\x42\x7e\x7d\x05\x15\xb7\xf0\x1a\x1b\xcb\xd4\x5d\x4f\x3e\x2f\xc7\xe5\x2e\xaf\xee\x27\x24\x46\x28\xf1\x3c\xd1\x7a\x27\x7b\xd9\xee\x28\xcd\x8c\x46\xbf\x0d\x0b\x20\xa2\xc8\xc2\x61\xbd\x85\x83\xb2\xed\xb9\x9d\xf7\xb2\xd1\xfb\x5d\xf4\x62\xa1\xfb\xbb\xcf\xd0\x1b\xd8\x33\x61\xc2\xf2\x33\x25\x9c\xc4\x28\xfd\x36\xd2\xcb\xa6\x00\x77\xe1\x41\xae\xcf\xf6\x57\x9a\xe8\x57\x04\xf9\xda\x2e\xbb\x34\xed\x96\x9f\xee\x20\xb9\xdc\x84\x9b\xf4\xe6\x46\xf6\x6d\xa3\x93\xcd\xf5\x94\xd9\x6f\xa3\x8d\xb8\x97\x98\xda\xd9\xbf\x14\x4a\xd1\x2d\x43\x57\x30\xcf\xfa\xcf\xf5\x66\xb4\x99\xfc\xf3\xd3\x7c\xbd\x9c\x8c\xa7\x1f\xa6\x93\xfb\x2e\xac\x26\xa3\xfb\x2f\x5d\x18\xaf\x26\xa3\xcd\x74\xfe\x6b\x17\xee\x27\xb3\x89\xfb\x35\x59\xad\x16\xab\x2e\x7c\x5a\xde\xe7\x53\x37\xf7\x6f\x00\xc8\x4d\xda\x74\x64\x0b\x41\x63\x36\x07\xd3\x18\x29\x71\x35\x06\x4b\x88\x8d\xc1\x1c\x6d\x63\xa4\x04\x5e\x79\xd9\x6c\x39\x5e\x48\x84\xc6\x85\xe8\x92\x9b\xf3\x75\x2f\xf1\xf3\xb9\x60\x2b\x61\x1c\x84\xfc\x15\x32\xa1\x41\x62\xef\xea\x89\x70\x4f\xab\xce\xa3\xe7\x6f\x6b\xaf\x28\x24\x97\x2e\x65\x70\xa1\x90\xc3\x53\x37\x92\xe7\x0a\x7a\x8b\x9d\xe3\xee\xca\xf9\x0f\x12\x19\xd1\xf4\x11\xbb\xf9\xd3\x71\x64\x18\x3b\xc0\xef\x86\x30\x1a\x51\x0c\x7f\xf4\x60\xe2\xc5\x1e\xd0\x3c\x8e\xcf\x74\x82\x7b\xf7\x29\xec\xa4\x90\x45\x33\xca\xbf\xda\xb3\x2b\xd1\x3a\x53\x43\xdf\x0f\x5c\xfd\xf3\xe2\xfc\x11\x9c\x64\x54\x79\x81\x48\xcb\x61\xff\xf1\xbd\x5f\xdd\xf0\xcb\x0b\xbe\x2f\x31\x76\xb7\x7b\xf7\xe3\xe8\xbb\x0d\x6c\x01\x55\xfe\x37\xf7\x61\x19\x1e\x5b\x70\xdc\x41\x2b\xa5\x2d\x73\x94\xdb\x5c\x74\xc5\x2e\x7f\x09\x47\x16\xc2\x4e\x18\x16\xc2\x16\xe1\x09\x35\x2f\xf4\x1c\x3c\x53\x08\xe1\xca\x51\xb0\x3e\x91\x69\x5b\xf3\x4a\x19\x84\x6b\xa5\x10\xaa\x23\x63\x1a\xbe\x3e\x5c\x96\xa5\x8a\xa7\x62\xa6\x10\xaa\xa5\x43\x95\x22\xb8\xa7\x4a\x2b\x0f\xa6\x51\x1e\x43\x0a\x75\xd7\x59\x3d\xbb\x50\xec\xc1\xb5\x37\x44\x29\x93\xba\x17\xb0\xad\x4b\xa9\xfa\x01\x55\x6d\x51\x35\x4e\xee\x59\x26\x7f\x05\x41\xd5\xd6\xb9\x23\x0a\xa8\x52\x06\x43\xef\xbf\xe4\xd1\x97\x1e\x6f\xe7\xbe\xfd\x77\x00\x00\x00\xff\xff\x11\xbb\x0c\x0a\x24\x1a\x00\x00"), }, } fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ diff --git a/pkg/servicemapping/embed/assets_vfsdata.go b/pkg/servicemapping/embed/assets_vfsdata.go index ad0ca928f2..f2f5c624be 100644 --- a/pkg/servicemapping/embed/assets_vfsdata.go +++ b/pkg/servicemapping/embed/assets_vfsdata.go @@ -69,16 +69,16 @@ var Assets = func() http.FileSystem { "/compute.yaml": &vfsgen۰CompressedFileInfo{ name: "compute.yaml", modTime: time.Time{}, - uncompressedSize: 96415, + uncompressedSize: 98460, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x7d\x73\xdb\x38\xd2\xe7\xff\xf9\x14\xa8\xb8\xea\x32\xf3\x94\x23\x4f\x32\xfb\xf2\x6c\x9e\xba\xaa\x53\x64\xc7\xd1\x8d\x63\xab\x2c\x27\x7b\x5b\x57\x57\x1c\x88\x84\x24\xac\x29\x80\x0b\x80\xf2\x68\x73\xf9\xee\x4f\xe1\x8d\x04\xf8\x22\x91\x8a\x64\xd1\x8e\x5c\x35\x13\x5b\x22\x40\xa0\xbb\xd1\xdd\x00\xba\x7f\x7d\x02\x06\x34\x59\x31\x3c\x9b\x0b\xf0\xf6\x97\xb7\x6f\xc1\x25\xa5\xb3\x18\x81\xab\xab\xc1\x8b\x93\x17\x27\xe0\x0a\x87\x88\x70\x14\x81\x94\x44\x88\x01\x31\x47\xa0\x9f\xc0\x70\x8e\xec\x37\xa7\xe0\x0b\x62\x1c\x53\x02\xde\xf6\x7e\x01\x3f\xc9\x07\x5e\x9a\xaf\x5e\xfe\xfc\x5f\x2f\x4e\xc0\x8a\xa6\x60\x01\x57\x80\x50\x01\x52\x8e\x80\x98\x63\x0e\xa6\x38\x46\x00\xfd\x11\xa2\x44\x00\x4c\x40\x48\x17\x49\x8c\x21\x09\x11\x78\xc0\x62\xae\x5e\x63\x3a\xe9\xbd\x38\x01\xff\x30\x5d\xd0\x89\x80\x98\x00\x08\x42\x9a\xac\x00\x9d\xba\xcf\x01\x28\xd4\x80\xd5\xcf\x5c\x88\xe4\xdd\xd9\xd9\xc3\xc3\x43\x0f\xaa\xd1\xf6\x28\x9b\x9d\xc5\xfa\x49\x7e\x76\x35\x1c\x5c\x5c\x8f\x2f\x5e\xbf\xed\xfd\xa2\xda\x7c\x26\x31\xe2\x1c\x30\xf4\xaf\x14\x33\x14\x81\xc9\x0a\xc0\x24\x89\x71\x08\x27\x31\x02\x31\x7c\x00\x94\x01\x38\x63\x08\x45\x40\x50\x39\xe0\x07\x86\x05\x26\xb3\x53\xc0\xe9\x54\x3c\x40\x86\x5e\x9c\x80\x08\x73\xc1\xf0\x24\x15\x1e\xb5\xec\xf0\x30\xf7\x1e\xa0\x04\x40\x02\x5e\xf6\xc7\x60\x38\x7e\x09\xde\xf7\xc7\xc3\xf1\xe9\x8b\x13\xf0\xf7\xe1\xdd\xc7\x9b\xcf\x77\xe0\xef\xfd\xdb\xdb\xfe\xf5\xdd\xf0\x62\x0c\x6e\x6e\xc1\xe0\xe6\xfa\x7c\x78\x37\xbc\xb9\x1e\x83\x9b\x0f\xa0\x7f\xfd\x0f\xf0\xdb\xf0\xfa\xfc\x14\x20\x2c\xe6\x88\x01\xf4\x47\xc2\xe4\xf8\x29\x03\x58\xd2\x11\x45\x92\x68\x63\x84\xbc\x01\x4c\xa9\x1e\x10\x4f\x50\x88\xa7\x38\x04\x31\x24\xb3\x14\xce\x10\x98\xd1\x25\x62\x04\x93\x19\x48\x10\x5b\x60\x2e\xb9\xc9\x01\x24\xd1\x8b\x13\x10\xe3\x05\x16\x50\xa8\x4f\x4a\x93\xea\xbd\x78\x01\x13\x6c\xf8\xff\x0e\x84\x94\xa1\x5e\x48\xd8\xa2\x17\xc6\x34\x8d\x7a\x33\x25\x4a\xbd\x90\x2e\xce\x96\x6f\x60\x9c\xcc\xe1\x9b\x17\xf7\x98\x44\xef\xc0\x18\xb1\x25\x0e\xd1\x27\x98\x24\x98\xcc\x5e\x2c\x90\x80\x11\x14\xf0\xdd\x0b\x00\x08\x5c\xa0\x77\x4a\x20\x52\x51\xd3\x9b\x79\x8a\x27\x30\x94\x8f\x12\xb6\x78\xcd\x57\x5c\xa0\xc5\x0b\x39\xb7\xbc\x93\x81\xee\xe4\x05\x00\x4b\x3b\xc4\xe5\x9b\x09\x12\xf0\xcd\x0b\x00\xb8\x1e\xc2\x47\xca\xc5\xb5\x7a\xfa\xa5\x7d\xa7\x7e\x11\x4c\x30\x97\x2f\x7b\xf9\x02\x00\x86\x38\x4d\x59\x88\xb8\xec\x1b\x80\xd7\xa6\x7f\xfd\x60\x60\xda\x05\x30\x8a\x24\x1b\x5e\x68\x11\xd4\x13\x35\x43\xe8\x7b\x5f\xd9\xe9\x9a\xe9\xbf\x33\x1f\xdb\x61\xcb\xff\x67\x1f\xc5\x70\x82\x62\xfe\xce\xfc\x6b\x3e\xb6\xe3\x19\x9e\xe7\x6d\x05\x64\x33\x24\x3e\x60\x14\x47\x5e\x17\x38\xba\x43\x8b\x24\x86\x42\x4e\x31\x61\xf4\x9f\x28\x14\xfc\xec\xeb\x57\xf3\xeb\xb7\x6f\x67\x0c\xcd\x24\x7b\xcf\xbe\x7e\xd5\xbf\x7d\xfb\x76\x66\xa6\x82\xe4\x87\xb2\xaf\x6f\xdf\x5e\x96\xba\x1b\x40\xf2\x1e\x7d\xe6\x28\xba\xa3\x9f\xa0\x08\xe7\xb7\x66\x50\x9a\x9a\x82\xa5\xa8\x30\xda\xfe\x12\xe2\x58\xae\xa8\x21\xe9\x73\x8e\xc4\x90\x2c\x11\x11\x94\xad\xbc\xa7\x63\x1a\x2a\x71\x83\x31\x16\xab\x77\x40\x0f\x09\xc6\x85\xbe\x6e\xd1\x14\x31\x44\x32\x96\x68\xb6\xdc\xa3\xd5\x3b\x40\x90\x78\xa0\xec\xfe\x16\x4d\xb3\x6f\x00\x10\x53\x4b\x19\xfd\xad\xf3\x55\x84\x78\xc8\x70\x22\x94\x7c\xfc\xff\xd7\xce\x37\x00\xdc\xcd\x91\x6d\xa1\x56\xfd\x1c\x87\x73\xa9\x01\x18\x92\xe2\xa3\xd7\x97\xa1\x55\x0f\x0c\xa7\x60\x16\xd3\x09\x8c\x4f\xdd\xcf\xbd\xfe\x16\x29\x17\x60\xa2\x95\x1c\x26\xea\xb1\xdb\x0f\x83\x37\x7f\x7b\xf3\x9f\x60\x38\x02\x4a\xa0\x7b\xde\x4b\x43\x48\xa4\xde\x9c\x20\x10\xa1\x18\x09\x14\x79\xdd\x61\xa5\x01\x19\x02\x50\xfe\x47\x56\x76\x5c\x91\xec\x8d\x41\x32\x43\x52\xa9\x4d\x11\x63\x72\x71\x4b\xcd\x25\x64\xf7\x4a\xff\xa2\x38\x92\xbd\x7b\xfd\x51\x12\xaf\xe4\xbb\x52\xa9\xf0\x95\x22\x1e\x5e\xdf\x5d\xdc\x5e\xf7\xaf\x80\x58\x25\x8e\x6e\xfe\x32\x1a\x04\xa3\x8b\x8b\xdb\xe1\xf5\xa5\xd2\x13\x6e\x27\xc3\xd1\xf8\x62\x10\xa8\x86\x83\x9b\xeb\xeb\x8b\xc1\x1d\x48\x52\x96\x50\x8e\x78\xcf\x79\x70\xb6\xbc\x7f\xe7\xb5\xf3\x16\xcc\x75\x89\x4d\x95\x8b\xd8\xe9\x8e\xd1\x34\xd9\xac\x37\x2a\x57\x0b\x47\xf1\x34\x88\x31\xb9\x2f\x8a\x12\x4f\x27\xeb\xa5\x29\x7f\xa0\x85\x40\xe5\x8d\x1a\xc9\x14\x24\x92\x9b\xe6\x13\x80\x7d\x81\x32\xca\x1c\x45\xa7\x00\x8b\x2a\xf1\xca\x5f\xf6\x8a\x67\x52\xd1\x03\x8e\x14\x78\xfd\x85\x90\x34\x93\x82\xcb\xc1\x45\x70\x71\x7d\x3e\xba\x19\x5e\xdf\x9d\x9d\x5f\x8f\x83\xdb\x8b\xf1\xcd\xd5\x97\x8b\x5b\xaf\xbb\xf6\x8c\x1f\x57\x51\xf4\x11\x79\x1f\x52\x22\x7d\x0c\xc4\x3c\xc5\x22\xe7\xfd\x0e\x18\x95\x59\x25\x08\xee\x57\x35\xe6\x61\x02\xc3\x7b\x44\xa2\x60\x92\x86\xf7\x48\x54\x59\x89\xf7\xfa\x89\xf7\xee\x03\xcd\x6d\xc5\x6e\x8d\x82\xd6\x64\x67\x13\x77\x48\x07\xb0\x06\xf5\xfa\xde\x2c\x51\x4d\x4d\x77\x79\x66\x3c\xd1\x5f\x05\x9e\x39\xad\x5f\x9b\xd9\x1b\xe4\x5a\x94\x2b\x47\x37\xcf\x25\xb7\x20\xb7\xc6\x91\x11\x94\xc1\x19\xf2\x58\xb6\x41\x5e\x8d\xb4\x72\xdd\x72\xad\xb4\xee\x5f\x18\x8d\x17\xb4\x46\x1a\xc7\xde\x13\xfb\x11\xc7\x82\xcd\xd7\xb2\xb7\xa5\xa8\x9a\xf1\x76\x4a\x56\x15\xd7\x2c\x77\xe6\x08\xc6\x62\x1e\x84\x73\x14\xde\xf3\x16\x66\x43\xb7\x03\xaa\x5d\xee\x91\x2a\xb7\xde\xfd\x4a\x59\xfa\x79\xc1\x4c\x54\xb2\xb4\x07\x06\x29\x63\x88\x88\x78\x05\xa0\x00\x0b\xca\x05\xa0\xa4\xf0\x22\x69\x10\x26\xa8\xce\xe6\x40\x12\x01\xe8\x37\xc0\xf9\x3e\xca\xd5\xfa\x52\x68\xb9\xaf\xf7\xcd\x0a\xd6\xad\x07\xb2\xb1\x6f\x65\x41\x95\xb1\x28\x89\xe9\xc7\xbc\x79\xe9\xc1\xb5\x56\x03\xb4\xb7\x1c\x60\xb3\xe7\xe0\x4e\x4c\x88\xe4\xe3\x77\x4e\xee\xee\x6e\xd4\xb1\x09\x3a\x92\x6c\x56\x5c\x4f\xbd\xa4\xa1\x24\x7b\x6a\x16\xda\x79\x0e\x09\x17\x72\xf7\x7f\x29\x7b\x92\xfb\x58\xdf\x0d\xbc\x20\x51\x42\x31\x11\x97\x85\x17\xe5\x6b\xae\x07\x86\x04\x84\x90\x23\x40\xa7\x00\x9b\xde\xf4\xec\xf5\x81\x43\x84\xa6\x98\x20\xae\x34\x7b\x8c\xa5\xa4\xfb\xdc\xb0\x6d\xe4\x13\x50\x00\xe3\x8a\x31\x38\x9d\xe2\xb0\x07\x3e\xa1\xc5\x04\x31\xb0\xc4\x4c\xa4\x30\x06\x0b\x18\xce\x31\x41\x4e\xa3\x29\x2b\xd0\x11\xc1\x70\x5e\x1c\x88\x72\xcf\x62\xbc\x94\x0d\xb5\x73\x06\x17\x08\xfc\x5b\xae\x39\xa8\x47\x66\x1b\x94\xdd\x1b\x80\x85\xe4\x45\x0f\x5c\x53\x20\x1e\xa8\xa5\x3d\x07\xea\x3c\xc4\xfc\x65\x37\xb5\x7a\x3f\x10\xc7\xf4\x41\x1d\x58\x78\xbd\xa5\xdc\xbc\xd7\xd2\x1c\x68\xa2\x67\x94\x7c\xe1\x3d\xfe\x81\x32\x60\xb8\x00\x2c\x1b\x74\x0b\xee\x13\xd6\x10\x15\x20\xf3\x10\xef\x81\x7e\x1c\xfb\x34\xb1\x5f\xc9\xe7\xaa\x3b\xcd\x5c\xd8\x39\xe5\xe6\x98\x24\x23\xb2\xd7\x97\x32\x19\x28\xaa\xa5\x64\x75\xf7\x85\xc9\xbd\xf7\xc9\xc6\xed\x76\x6b\x81\xff\x28\x92\x47\x6a\xba\x62\x9f\x5e\x5f\xfa\x31\xcb\x96\xc2\x8b\xfe\x3e\x47\x7a\x9c\xaf\x62\x0a\xa3\x60\x02\x63\x48\x42\x4c\x66\x01\x0f\xe7\x68\x81\x5e\x49\xad\x69\x9d\xec\x53\xed\x80\xfb\xb2\xe3\x4f\x5e\xb2\x97\xa7\x49\x42\x99\x68\xaa\x65\xb1\xbb\xc2\xb6\x51\x45\xde\x12\xed\x82\x22\x02\xa5\x2d\xbf\xa7\x26\xb6\x99\xe3\x46\x75\x73\xa0\xa9\x3a\x3a\x97\xa3\x30\x65\x58\xac\x82\x84\xc6\x38\x5c\xb5\xd9\x76\x9a\x96\x40\xb7\x04\x90\x73\x1a\x62\xb5\x88\xcc\xbe\x1e\xf3\xa2\x22\x71\x65\x4b\xef\x88\x4d\x27\x23\xd5\x87\x4f\xe3\x0d\x1b\x3b\xaf\xe5\xe1\x36\xf6\x65\x4a\x72\x24\x04\x26\x33\xde\x0b\x63\x8c\x88\x08\x44\xcc\xdb\x51\x77\xa0\xda\xdd\xc5\x5c\xcf\x4d\x2e\x66\x98\xe9\x53\x63\x4f\x8c\xcf\xc4\xc1\x9c\x3e\x00\x08\xf4\xab\x00\x9f\xd3\xb4\xb0\x03\x87\xa9\x98\x23\x22\xb0\x54\x6f\x72\x03\x40\x50\xa8\x4f\x5f\x85\xa3\xf4\xe9\x14\xc0\x8c\x47\x7a\x2f\x9f\xbd\x4f\x1b\x0a\xaf\xcf\x88\x22\xae\xce\xdf\xe1\x74\x8a\x42\x21\xbb\x9d\xe2\x59\xca\x94\xd3\x0d\x52\x7d\xfa\x8d\x85\x1a\xb8\x10\x30\x9c\xeb\x73\xee\xcc\xae\xf8\xee\x9f\xb1\x31\xb9\xe9\xfd\x8f\x81\xea\x6f\xa0\x07\x4b\x99\x56\x60\x46\x41\x71\xf0\x3b\xfa\x43\x20\x46\x60\xfc\x7b\xc1\x74\x5b\x37\xd9\x1c\x4d\xdb\x03\x89\xde\x7f\x14\xa5\x4e\x53\xeb\xee\x6a\xdc\x42\xec\xcc\x5a\xb6\x62\x37\xf0\xbb\x68\x2d\x7f\x46\xc5\x58\xa1\xa9\x3d\xe6\xf6\x3b\x70\xc4\x0d\xc3\xa4\x47\x25\x73\xdf\x06\x46\xce\x70\xd4\x50\xbe\x6e\x24\x39\x73\x2a\x9a\xc3\x3b\xcc\x73\x1b\x20\xb9\x65\x99\xaa\x4f\x90\x32\xea\x16\x2c\xd1\x4d\x5f\x0e\xc1\x88\x2c\x18\x9e\x2b\xda\x0f\xfb\xa3\xd2\x4a\xd7\x63\xd5\xcf\x0d\xa3\x46\x24\x1f\xf6\x47\xc3\x48\xca\xae\x58\xf5\x1f\x20\x43\x23\x46\xff\x30\x84\x6f\x4d\x6f\x49\xae\x56\x6b\xdd\xd9\x4e\xee\x60\xdb\x0c\xc0\x22\x15\x72\xc7\xf7\x3e\x15\x9f\x09\x43\x30\x92\x7f\xa8\x07\xbd\x4e\xcb\x5c\xe5\x28\x64\x68\xed\xde\x3b\xc2\xfc\xbe\x6a\xc3\x7d\x9e\x7f\xde\xb1\x1b\x02\xe9\x58\xc9\x4f\xe4\xbf\xdf\xbe\x9d\xc9\x09\x1c\xfe\x66\xe0\xdf\xce\xb5\x00\x86\x0b\xad\x83\xf2\xd9\x6a\x0d\x7e\x5d\xc7\x80\x00\xc3\x45\x51\xcb\xeb\x3f\xb5\xaf\xbf\xbe\xe1\x42\x3d\x93\x35\xcc\x16\x9b\x26\xae\x23\x5c\x15\xfc\x02\x46\x12\xbd\x0f\xad\xb2\x1c\x50\x12\x61\xa5\xef\xdf\x81\x29\x8c\x79\xcb\xb3\x85\xb2\x74\xab\xb5\x6c\x3e\xf6\x17\xf1\x26\x7f\xc1\x34\xd2\xf6\x4b\x78\x26\x66\x82\x62\x4a\x66\xd2\x24\x35\x38\xd8\x1d\x95\xc6\xd4\x4c\x03\xd8\xb7\x2d\x20\x81\x33\xc4\x36\x68\x03\x57\xd3\x2e\xe0\x0c\xb5\x98\xa8\x7a\x5e\xed\xe0\xf2\xc3\x78\x4c\xb0\xc0\x30\xc6\xff\x36\xf7\xd5\x92\xf3\x25\x15\xa9\x1a\x16\x6e\x06\x36\xf9\xac\xeb\x9d\xa4\x61\x61\xe8\xbb\xf4\x8d\x5c\xd7\x87\xc0\x84\xcf\xa9\x68\xe3\x3d\x6a\xce\xdb\x96\xfa\x72\x40\x5a\x1d\x86\xa4\xa3\x52\x4f\x24\xdb\x62\xa7\x74\x1a\x97\x27\xb0\x27\x52\xa9\x25\x8f\x48\xc8\x56\x8a\x36\xc1\x3d\x5a\xf5\xee\x17\x5c\xfe\x1b\x54\x8d\x7b\x13\x1d\xf3\xae\x24\x79\x32\x32\x9a\x8f\x95\xe9\x56\x64\x04\xff\xa0\x29\xb3\x6b\xf0\x15\x77\xee\xb2\xf3\x9f\x0b\x32\xc3\x04\x81\xb1\xba\xfb\xce\xf7\xfe\x61\x48\xd3\x82\xc5\xfd\xe9\x95\xf9\xf6\xf5\xd7\xaf\xa3\xdb\x9b\xff\x7d\x31\xb8\x0b\xae\x3f\x7f\x7a\x7f\x71\xfb\xed\xdb\xff\x32\x34\x31\x77\xe8\x3d\x0c\x17\xbd\x99\x79\xdc\xf4\x25\x89\xf3\xea\x67\xaf\x47\xb5\x4f\x9f\xc3\x25\x02\xaf\x18\x8d\x11\x3f\x53\xc4\xbc\x5f\xf0\x9e\x9a\x08\xfd\x0d\xad\x2e\xf4\x9c\x10\x3b\x47\xe6\x97\x57\x72\xaa\x36\x06\xc4\xeb\x6e\x8a\xa0\x48\x19\xea\x81\x31\xf2\xe7\x38\x17\x22\xe1\xef\xce\xce\x4a\xae\x96\x19\xf5\x59\x44\x43\x6e\x2c\x52\x98\x72\x41\x17\x88\xbd\xd6\x2a\x23\x7a\x9d\x13\xfb\xc4\xfc\x1a\xc0\x80\xa0\x87\x20\x91\xa2\xc2\x85\xb4\xd7\x8a\xc1\x72\x03\x14\xac\x68\xca\x02\xfa\xa0\x78\xcc\x8b\x62\x7c\xbf\xe0\xbf\xa1\xd5\x6e\x84\xf8\xb7\x4f\xe3\x81\x25\x52\x6b\x09\x56\x24\xde\x8d\xf4\x2a\x16\x07\x65\x79\x59\x27\xc3\x76\x2f\xe9\xc9\x9a\x16\x63\x1b\x60\xe2\x88\x38\x43\xff\x4a\x11\x17\xd9\x57\x33\xbc\x44\x44\xce\x5f\x12\xb5\x57\xe8\x78\x38\x05\x70\xc2\x11\x11\xfa\xfe\xdc\x48\x7c\x26\xe5\xe6\x95\xfd\x99\xda\x38\x15\x06\x80\xb9\x1a\x43\x49\xfb\x68\xb6\x99\xb6\x7d\xfd\x70\x43\x5f\xf6\x93\xdf\x6a\x0b\x0f\x76\xd1\xce\x83\x45\x0b\x88\xe3\x4a\x75\xad\x74\x6f\xa0\xcc\xcd\x51\x17\x1d\x75\xd1\xd3\xd4\x45\xcd\xa4\x78\x1b\x9d\xb4\x27\x8d\x74\x18\x7d\xb4\x46\x25\x1c\x54\x59\x95\x39\x69\x3d\xbb\xa3\x4a\x3a\xaa\xa4\x27\xad\x92\x36\x0b\xf2\x51\x2b\x75\x5f\x2b\xd9\xf3\x02\x7d\xaa\x83\x51\xd3\xd8\x0a\x7b\x3a\x05\x6c\x3b\x1d\xfb\xad\x15\x4f\xb6\xaf\x55\xac\x81\xa9\xa0\x0b\x28\x70\x98\xef\x83\xd5\xfe\x17\x53\xd2\x22\xc4\xcd\xbe\xf0\x90\x37\x21\x3b\x39\x22\x9d\x63\xc4\x20\x0b\xe7\x38\x84\x71\xcd\xc1\x54\x4d\x87\x55\xa7\x52\x35\xa7\xa5\xf6\xd8\x3b\x58\x26\x24\x98\x41\x81\x1e\xa0\xa5\x99\x47\xd5\x0b\xf3\xdc\x97\xd1\xf5\xa5\xf7\xd4\x81\x23\xe8\xec\xf8\xbf\x24\xc4\x8c\xeb\x00\x47\xa7\xfb\x0b\x24\x9b\x62\x86\x1e\x60\x76\xd1\xef\xb1\xe4\x83\xff\xdd\x81\x19\x61\x47\xda\xdd\xc8\xb0\xed\x63\xd3\x05\x35\x17\x77\xf6\x1a\x4d\x4f\xb5\x70\x40\x5b\x1f\x1e\xdf\xf1\x60\xec\x92\xc1\xf6\x0d\x72\x53\x4d\x3f\x9c\x66\xc7\x98\xbe\x61\xe4\x3a\xa2\x22\x8f\x6b\x93\x96\x35\x23\xe2\x03\x8e\x63\x65\x13\x56\xea\x62\xd3\xeb\x52\x9a\x08\x1d\x24\x04\x28\xc3\x33\x4c\xa0\xc0\x64\xa6\x0f\x93\x61\x1e\xbb\xa2\x6f\xd8\x61\xd9\x20\x93\xb2\xaf\x17\x63\x2e\x7a\x60\x5c\x33\xd0\x3c\x21\x20\x3b\x81\xa5\x44\x30\x1a\x67\xe3\x90\xb2\xe0\x87\xf5\xdb\x51\xbc\xe2\xc0\x6a\x23\x37\xa4\x7c\x82\x42\xa8\xa2\x82\xaa\x88\x92\x47\x09\x78\x5d\xea\xf9\xe4\x13\x3c\xd5\xd7\xcb\x6e\xa8\x7a\xcf\x10\xfb\x56\xe7\x21\xe8\x40\x42\xc0\x91\x00\xea\x3e\xa1\x10\x53\x08\x17\x08\x08\xbc\x50\xf1\x3b\xba\x9d\xef\x3b\xe8\x58\xf8\x09\x95\xaf\x65\xaa\x9b\x0a\x2e\x79\x5d\x6a\x8e\x39\x0c\x52\x17\x19\xf3\xac\x7f\x97\x04\x26\x60\xde\x1b\xf0\xcd\x6d\x79\x94\x59\xc3\xfc\x06\xa4\xcc\xe6\x22\x93\x25\x43\x55\xa4\x52\x45\x5f\xfe\x24\x75\xee\x47\x7e\xe9\x9f\x5f\xdc\x13\x93\x7a\xb6\x90\x5a\x49\x91\xc1\xeb\x2c\x61\x34\x41\x4c\xe0\xec\x2e\x1d\xb9\x3a\x40\x93\xa2\x57\x4d\xd7\x5c\xa2\xbc\x1e\x95\x74\x69\x46\x55\x31\xe7\x0e\xce\x54\xbe\x99\x5e\xba\xf2\xaf\x06\xfe\x4f\x97\xce\xd4\xf4\x53\xdb\x6a\x92\x7e\x16\xee\x56\x5a\x34\x98\x44\x38\xd4\x5a\x80\x23\x1d\xea\x96\x87\x09\xfa\x51\x6b\x5e\x97\x99\x2a\x97\x52\xba\x80\x2b\xb0\x80\xf7\x4e\x1e\x90\x13\x08\x22\x99\x60\x55\x95\x0a\xff\xd3\x81\x7e\xff\xf7\xff\xf9\x1b\x05\x3d\xc3\x5a\x6e\xd7\x73\x38\xe7\x29\xa0\xac\x42\x66\x15\xb7\xe5\x6a\x24\x26\xff\xb0\xfa\x4d\xc4\x13\x0f\xb9\x6a\xfd\xbe\x6a\xb4\x2d\x4b\x63\x64\x3c\x70\xbd\xbc\xe2\xd8\x21\x20\x25\x6e\x0e\x63\x41\x29\x19\x62\x75\x5f\x12\xf7\xe8\x8e\x51\xf6\x00\x59\x84\xc9\x2c\x90\x84\xac\xf4\xca\xb2\x47\x6e\xf3\x27\x1e\x35\xe0\x60\x7d\x8a\xdf\x56\x09\x8b\x53\x6f\x52\xdd\x75\xf1\x34\x49\x5a\x78\x78\xba\x81\x13\x4d\x46\x01\x43\x21\xc2\x26\x51\x4c\xd9\x03\x69\x1a\x6c\xa4\xb2\x6c\x63\x88\x51\x58\x1d\xd6\x0c\xda\x08\x5b\x15\x43\xa6\x12\xba\x60\x22\xed\x07\xc3\xea\x22\x57\x27\xbf\x98\xd7\xd2\x89\x24\x79\x0f\x7c\x28\x28\x02\x1b\xab\x1a\x8c\x2f\xae\x3e\x04\x9f\xfa\xd7\xfd\xcb\x8b\x73\x10\x53\x18\x81\x2c\xb6\xd5\xc4\xb1\x7e\xbc\xbb\x1b\xa9\xe8\x59\xf9\xcb\xd8\x74\x5c\x0e\x67\x5d\xc2\x18\x37\x0c\x65\xd5\x5d\xe4\xfb\xbc\x6d\x22\x3d\xef\x0a\x7d\x94\x9e\x3e\x6c\x40\xab\x7e\x52\x52\x4c\xc5\x50\x6d\x3f\xc3\xac\x8b\xce\x4e\x70\xbc\x83\x19\x8e\xbb\x3b\xc5\xbb\xc1\xf7\xb2\xd0\xf6\xd0\xc9\xe9\x8d\xc7\x57\xdf\x39\x3d\xdb\x43\x27\xa7\x77\x79\x3b\x1a\x7c\xe7\xfc\xb2\x2e\xba\x30\x41\x37\x4a\x2a\x09\xca\x19\xea\x9b\xac\x91\xb3\x65\xca\xc3\xc2\x72\xdb\xab\x9d\x37\xcc\xb5\x57\x4c\x66\xd2\x5d\x9b\xa0\x39\x8c\xa7\x80\x4e\x0b\x81\xa8\x7d\x0b\x31\xa0\xd4\x3f\x43\x5c\x30\x1c\x4a\xdf\x78\x02\xb9\x4e\xfc\x10\xb9\x29\xb3\x7d\xbf\xe2\xca\xc8\x78\x1d\x65\x06\x07\xe8\x64\x0a\xf0\xd3\xc5\xff\x31\xe9\xca\x94\x65\x96\xea\x67\x65\x83\x78\x48\x13\x04\x7e\xd2\x27\x42\x45\xef\xd6\xfa\x21\x3f\xd7\x25\x6f\xf8\xf6\xcd\xbe\x0e\x73\x60\x5f\x78\xaa\x76\x5f\xa6\xf7\x7c\xe8\xfe\x5b\xa4\x7f\xe2\x21\x04\x64\x16\x19\xda\xa6\xc3\x91\x4e\xad\x93\xbd\xd9\x51\xb5\xed\xaf\x94\x67\xa4\x3b\xb2\xf9\x31\x6b\x7a\x53\xbe\x7d\x1e\x8e\x2d\x27\x88\x16\x89\x58\xc9\x31\x01\x94\xc8\x49\x33\x35\xc8\xe5\x9f\xb2\x57\xaa\x03\x8f\xca\x4d\x7d\x91\xe4\x39\x99\xf5\xc1\xca\x44\x9d\x31\xe0\x19\x41\x51\x0f\xf4\x8b\xce\xa0\xfe\x29\x4a\x58\x16\x5a\xae\xc6\x20\xdd\x0c\xd9\xb4\x44\x77\xe0\xb8\xc0\x66\x44\xb6\xa1\xd9\xb9\xe8\xf6\xea\xdf\xbf\xb4\xe6\x7a\x9e\xb0\xa3\x88\xe5\xa6\xce\x43\x02\x6e\x3f\x0c\xbc\xfe\x2c\xc8\x43\x7e\xea\x12\x53\x32\x33\xb8\x0c\x22\x3f\xcb\x3b\xd3\x19\xfb\x79\x38\x77\x76\x61\xb4\x8e\x24\x3d\xf0\x7e\x05\x22\x34\x85\x69\x2c\x4e\x35\x2e\xc4\x06\xfe\x15\x4e\x88\xca\xc7\x42\x19\x77\xec\xa5\x07\x8c\xe3\x95\xda\x6c\xea\x3d\xac\x65\x39\x18\x8e\x7c\x11\x82\x64\x86\x2c\x38\x8f\x99\x0c\x65\xee\x4e\xd6\x9f\x57\x59\x83\x34\xcc\x68\x2a\xa6\xe6\xfc\x93\x53\x32\x96\xdc\x81\x77\x6a\x27\x25\x35\x4a\x41\xbc\x4d\x53\x33\xc5\x6d\x94\x7a\xbf\x02\xd5\x03\x3c\x86\x32\x2f\x2a\xeb\x75\x68\x27\xeb\xd5\xb8\x2b\x18\x84\x3a\x97\x92\xd9\xe1\xa0\x2f\xf2\xca\xfd\xcf\x44\xc4\x3f\xab\x2c\x78\xfc\xae\xd4\xa9\x4c\x00\x95\xf2\x22\xdc\x93\xea\xb9\x39\x72\xa8\xd1\xe3\x1a\xbb\x44\xe7\xc5\x98\x35\x22\x17\x48\xad\xa8\x58\x5d\xe5\x2f\x0e\x77\x7a\x85\x83\x06\xb3\x48\xb2\x01\x59\x31\x2f\x5e\x8c\xae\x39\x95\xef\xf8\x69\xf9\x5a\xe8\x92\x16\xe0\x24\x1e\xab\x9a\xb1\x67\xbd\x8a\xf2\x45\x4f\xe9\xca\x4c\xf6\x32\x0d\x54\x6f\xdf\x0b\x0a\x7a\x38\xf5\x04\xcb\x39\x12\xd3\x49\xb1\xa9\xa0\x56\xfd\x2c\x68\x84\x7c\xe1\xf4\x47\x4a\x13\x7d\x1c\xd1\x03\x1f\xe9\x03\x5a\x22\x66\x54\xa8\x03\xfe\xa3\xfa\xd4\x11\x14\x7e\xaf\xb0\x60\x60\x32\xf2\x59\xab\x9e\x0d\xac\x52\xba\x2a\x11\x64\x0e\x0c\x91\x62\xa1\x35\x3c\xb4\x80\x1a\x04\x9c\x6a\x20\x89\x4d\x92\xd6\xaf\x46\x24\x68\x70\xba\x81\x4b\x10\x38\xa9\x76\x17\xe3\x55\x59\x8e\x5c\xd9\x69\x4a\xdd\x4a\xd8\x8b\x5d\x53\xf8\x60\x20\x34\xda\x47\x7a\xb2\x50\x65\xe6\x2a\xb7\x3b\xf0\x64\x1e\x54\xc9\x11\x9c\xcc\xed\xee\x08\x4e\x96\x77\xf7\x94\x74\xc3\x53\xbc\x42\xd8\x0e\x31\xe8\xa9\x5c\x1a\x1c\x8f\x6a\x8e\x47\x35\xc7\xa3\x9a\xe3\x51\x8d\xfa\x79\x8a\x47\x35\xcf\xe1\xbc\x05\x6c\x7f\xf2\x74\xbc\xff\xdd\xc3\xfd\xef\xf1\x72\xb4\xe1\x0c\x8f\x97\xa3\xc7\xcb\xd1\xe3\xe5\x68\x83\x09\xee\x65\x8b\x7e\x3c\x6c\x6f\x72\xd8\xfe\x43\xee\xb7\xe7\x70\x53\x4e\x4b\x67\x72\x59\x2a\x22\xee\x96\x87\xcc\x67\x39\xe0\x49\x9b\x5a\x18\x5f\x46\xd7\xc0\xf0\x4d\x81\xa6\x85\x21\x4a\x54\xc8\xaf\x75\xb2\xa6\x94\x3d\x11\xf9\x76\x10\x22\x05\x62\x26\xce\xb8\xaf\x92\x49\x16\xa8\x94\x06\x68\xe9\x27\x05\x57\x35\x98\xc2\x10\xf1\x9e\xdb\x36\x80\x59\xe3\x86\xe4\x1d\x2e\x0c\xd8\x56\xcf\x6e\xe5\x30\x97\x0e\x61\xaa\xf6\x6e\x09\x43\x79\xba\xa3\x24\xbc\x11\x3b\x4f\x99\x14\x37\x5c\x60\x38\xe2\x28\xb4\x19\xae\x28\x02\x03\x49\x1e\x30\x74\xc6\xf9\x5f\x2a\xb4\xf8\x62\x66\x4b\xa7\x0c\xc9\xac\xe4\xf0\x3b\xec\xcc\x19\x6f\xdf\x9f\xcd\x5f\x8f\x64\x26\x3d\x67\x46\xd3\xd9\x7c\x4d\x88\xb2\x4b\x28\x90\x13\xca\x01\xd3\xbb\xa6\x02\x84\x39\x2e\xb3\x5d\x0e\x20\x49\x27\x31\x0e\xe3\x55\x73\xb1\x1a\x56\x32\xf4\xd9\x69\x51\x07\x4e\xbb\x4a\x87\x96\xe1\x93\x3b\x8c\x61\xee\x60\x61\x3f\xab\x2c\xc1\xb9\x10\xc9\x66\x4e\x55\x82\x5d\x1f\x38\x6b\xb0\x00\xe2\xfd\xec\xb8\xc2\x1b\xb1\x65\xdc\x49\xbe\xf0\xe7\xca\x18\x17\xc1\xae\x16\x1f\xee\x71\xaf\x66\x5b\x01\x2c\x6a\x8c\x97\x6d\x10\x16\xf3\x96\x2d\x21\x16\x8b\xa0\x7f\x0d\x31\x16\xdb\xf9\x94\x11\xe6\xb5\x95\x80\x74\x66\xaa\x83\xed\x09\x9a\xa3\xea\x29\x80\x81\x02\x98\x9e\x46\x25\xb4\x37\x2c\x7e\x6a\x97\xaa\x87\x97\x72\x21\x65\x69\x89\x23\x64\xcf\xc8\x55\x43\x93\x0b\xb8\x02\x15\x47\xd1\x0c\x3e\x9c\x63\x7e\xdf\xb3\x88\x07\xf6\xd1\x49\x2a\xd4\xf6\x4d\xe5\x57\xe6\x03\x81\x44\x0f\xa3\xb9\xcf\x71\xee\xcf\xff\xc0\x79\xc2\x45\xa1\xd0\x91\x51\x5a\xe8\xcb\xd8\x8d\x0d\x99\xa5\x19\x53\x85\x7f\xd8\x92\x56\xfb\x04\x7b\xdc\x26\xa9\xba\x8c\xa9\xe8\xd0\x6b\x5c\x09\xe3\xb8\x0b\xd4\xc8\x96\x54\xdb\x33\xf4\xe3\x06\xc2\x35\xb6\x4f\x6a\x56\xcf\xcb\x28\xf9\xb5\x29\x2a\x8b\x03\x3c\x01\xd3\x64\x46\xba\x9d\x75\x72\x1b\xb7\x35\x50\xb6\xed\x76\x60\xc0\xed\x0c\x95\x7e\xe0\xbc\xde\x5c\x59\x98\x77\x65\xb0\x8c\x39\x78\x22\x4a\xbe\x1e\x6f\xaa\x7a\x72\xeb\xe0\xc8\x0e\x83\x40\xd5\x38\x86\x77\x13\x17\x27\x94\x8a\xa7\xca\x41\xdf\x0c\x83\x13\x90\x30\x34\xc5\x7f\xe8\x30\xae\x97\xfa\xdb\x97\x3a\x04\x02\x8a\x57\xba\x6e\x82\x2e\x51\x60\x22\x25\x54\x35\xda\x78\x85\xc9\x0c\xf4\x47\xc3\xf5\xe4\xc9\xe1\x9c\x83\x04\x32\xb8\xe0\xbd\xa2\x6b\xd0\x69\x53\xbd\x59\xe4\xf3\xa9\x3e\x5d\x71\x6f\x74\x62\x9b\x9f\x3a\xf6\x9e\x57\x60\xfe\x9a\x89\x76\x31\x46\xbc\x31\x53\xa1\x18\x26\x8d\x67\x0a\xc3\x10\x71\x1e\xe8\xc0\x93\x1e\x81\x22\xf0\x62\x2d\xd6\xcf\xb9\x2a\x54\x64\x6f\x13\x2e\xa7\xe3\xe4\xfe\xb4\xb4\x3d\x69\x8c\xc9\xac\x47\x68\x84\x02\x38\x9d\x2a\xfd\xe3\x21\xd2\xd5\x87\x37\xa8\x83\x6f\xef\x8b\x72\x80\x89\x8e\xcb\x28\x89\xd6\x3a\x00\xbf\xbc\xec\x8d\x87\x78\xd2\xeb\x10\x98\x5f\x53\xec\x96\x75\x48\x7f\xdd\x87\xde\x2b\xea\xbb\x16\xcb\x23\xfb\xe4\x49\x2c\x8a\xd6\x75\x45\x32\x98\x97\xc7\xdf\x36\x45\x98\xa1\x50\xe0\xa5\xef\x4b\x2b\x64\x9d\x80\x0b\xaa\xb6\x31\xc1\x94\xb2\x20\x4d\x22\x28\x1e\x6f\xb3\x15\x4c\x19\x5d\x04\xc2\x4c\xfe\xb8\xf5\x3a\x6e\xbd\x0e\xee\xb8\x1f\xb7\x5e\xc7\xad\xd7\x71\xeb\xf5\xbc\xb6\x5e\x56\x43\x5a\x2b\xbb\xfe\x92\x23\xd3\xa7\x05\xbb\xd4\x80\x99\x85\xf7\x74\xc3\x03\x3b\xee\x38\x8f\x3b\xce\xe3\x8e\x13\x78\x3f\x3f\xfc\x8e\x73\x87\xbb\x97\x13\xf0\xa0\x22\x6c\x09\x5a\x22\xa6\xab\x42\x40\xf0\xf9\xf6\xca\x80\xe6\xea\xdc\x07\x20\x10\x17\xaa\x84\x79\xcb\x3d\x4f\x55\x99\xc0\x27\xbe\xe9\x71\x0b\xca\x6f\x2c\x42\xbd\x9f\x40\x94\x7a\x4f\xbf\xde\x76\xd4\x46\x97\x16\x24\x7c\x83\x8d\x58\x23\xda\xad\xf4\xc7\x5a\x75\xe9\xa6\x97\x96\x0a\xac\xaf\x1b\xef\xb0\x5c\xa2\xfe\x51\x06\x5c\x16\xbb\x3a\xa1\xab\xa9\x56\xb0\xf5\x82\xd6\xd5\xee\x1f\xfb\x4c\x62\xd3\x1a\x69\x72\x26\x50\x70\xb3\x1e\xf5\x6c\x60\x17\x5b\xe5\x8e\xed\xaf\x2a\xb7\x4f\x1b\xc6\x5d\x8a\x41\x79\xe2\xbb\x1d\x35\xb5\xb6\x45\x31\xbb\xbd\xf9\x39\xee\x02\x8e\xbb\x80\x27\xbc\x0b\x78\xee\x8e\xf8\xc1\x77\x3a\xbe\xee\x7b\xda\x97\x4f\xcd\xe3\xd7\x0a\x2e\xc4\x33\x0b\x65\x5b\x97\xae\xd4\x3c\x89\xa6\x43\xe9\x78\xd5\xd9\x5b\x9d\x4b\xcd\xcb\xef\x70\x69\x2a\x90\x0b\x2a\xb0\x29\x94\x54\x27\x70\xdd\xaa\x66\x40\xd0\x0c\x09\x6a\x4a\x19\x88\x56\x04\x2e\x70\xa8\x3a\x55\x39\xb4\x2a\xef\x56\xbf\x42\x05\x4a\x7b\xbd\x4d\xea\xc1\x56\x30\x5f\xcf\x77\x85\xaf\xe0\x75\xb6\xf6\x71\x53\x3a\xc8\x43\xe5\xb0\xc9\x5f\xc5\xd0\x6c\x2f\x33\xcc\xc7\x19\x01\xff\xc3\x0e\xd2\x40\x73\x19\xa8\xaf\x02\x55\xfc\xee\xb8\x03\xdf\x51\x2a\x02\xa5\x49\xd3\xa6\x06\x54\xc5\x2b\x0e\x13\xad\x8c\x13\x8e\xc2\xc0\x26\x48\x07\x19\x14\x5c\x43\x51\x72\x72\x0d\xef\x72\x1c\x1e\xc4\x6d\x75\xa0\x25\x02\x13\x84\x48\x0e\x56\x56\x85\xe8\x52\x93\xc5\xd7\x03\x9f\xcb\x58\x84\x15\xc9\x7e\xf2\x55\x5e\x7f\x73\x83\xf4\xe3\x54\x84\xd4\x90\x94\x52\x2a\x15\x64\x59\x01\xf1\xc6\x1f\xba\x45\xf4\xb8\xfd\x30\x28\x61\xd7\x68\xa0\x35\x93\x58\xa9\x8b\x15\x92\x19\xf8\xd8\xf7\xba\x73\xd3\x59\xe9\x12\xe9\x02\x42\x35\xa3\xcf\xa0\x31\x9d\x09\x15\x32\xc2\x1d\xdc\x18\x53\xd0\x54\x83\xec\x14\x07\xa7\x52\x3a\x49\x96\xd7\x69\x46\xf0\x8a\x17\x61\x6a\x4a\x30\x37\x55\xc0\x36\xfe\x8c\x6b\xd2\x2f\xe7\x88\xa1\x02\x29\x3f\xa8\x5c\x7c\xb8\x48\x62\x94\x4d\xed\x63\xbf\x30\xa0\x3c\xd5\xf3\x17\x95\x92\x0a\x31\xcb\xeb\x43\x36\x11\x0d\x33\xd5\x6a\x3a\x64\x35\x9b\xbe\x8c\xae\xfd\x45\x54\xca\x34\xfd\xa5\x21\x11\xaa\xa7\x9f\xa5\xb1\xd6\x09\x70\x05\xc2\x6a\x11\xc0\x20\xc7\x59\xdd\x24\xe2\x52\xac\xbd\xfe\xea\xc5\xfb\x14\x48\x8b\xc4\x80\x54\x71\x05\x41\xf5\x85\xd3\x87\x04\x5c\x37\x97\xd3\x6a\x4e\x16\x50\x93\x8a\x86\xa1\x40\xd5\x0c\xd1\xab\xaa\x6e\x5a\x42\x69\xdc\x3c\x6f\xe1\x51\x77\x09\x8f\x97\x7e\xeb\xef\xf6\xd6\xec\x7b\x0f\x9c\x2f\x68\xc6\xf9\xac\xdc\x58\xbb\x8f\x45\x24\x4a\x28\x26\xa2\xfe\xdc\xdc\xb0\xe2\xc2\x3c\xb8\xff\xe3\xf3\x42\x36\xf4\xbf\x5b\xd5\x77\x2a\x9c\xc3\x92\x8a\xc1\x77\xd7\xb1\xfd\xae\x22\x9e\xda\xb1\x83\x71\x9c\x7d\x68\x79\x9b\xc5\x5e\x5c\x5f\x5c\x1a\xa8\x16\xe5\x6e\xf8\x0a\xe5\xa5\x01\x5c\x79\x69\xc5\x27\x07\x88\x9d\x82\x94\x54\x42\x51\x3f\x83\xaa\xa0\x55\x87\x4e\xeb\xa8\x7e\x63\xf0\xbe\x3d\xb4\xf3\xb6\xd4\x2f\x92\xb0\xee\x80\xad\xdb\x07\x66\xfb\xd7\x4e\x09\x42\x0e\x1a\x5e\x95\x08\x8d\xbc\x27\x1e\xc1\x50\x7c\xfd\x6a\x06\xf7\xed\x5b\xc5\x65\x2d\x4f\x27\xaf\x17\x48\xcc\x69\xa4\x31\xf2\xcc\xb3\xa7\xaa\x1a\x27\x79\x25\x9c\x1b\xdc\x96\xba\xa7\xe5\x55\x6d\xd5\xe3\xad\xb5\xcf\x13\x5d\xde\x00\x24\x90\x21\x22\x3c\x15\xec\x4d\x55\xca\x55\x50\x33\x5f\xf9\xdd\xf5\x13\x9b\x33\xbf\xc7\xc9\x70\x91\x50\x26\x36\xdf\x02\xaa\xb3\xcf\x7a\x6b\x4f\xa3\x83\xde\x90\xe7\x87\xf0\x34\xda\x14\x52\xa6\x66\x52\x11\x47\xb6\xd1\x68\xd2\x08\x01\xdb\x2e\x57\xde\x6a\x47\xa0\xbe\x53\xd4\xb1\x75\x7a\x7b\x6d\x04\x6f\x83\x8c\x38\x73\xda\x8b\xa0\xb4\xf7\x8f\x2c\xbb\xbb\xe1\xdc\xd6\x5a\x8f\x56\xc6\xa3\x42\x2c\x36\xb1\x61\x9b\xeb\x6d\xf7\x12\x61\x15\xec\xb1\x72\x41\xc5\x91\xad\xbb\x3a\xba\xc1\xba\x1d\x59\x7e\xf3\x48\x60\x19\x52\xc5\xbf\x91\x7e\xe6\x93\xff\x48\xc1\x4a\x67\xe4\xcb\x8d\xb3\xa0\x14\xcc\x21\x8b\x34\x10\x2e\xe4\x94\x00\x38\xa1\xa9\x00\x2b\x54\x0e\xf4\xd8\x9b\x35\xde\x1f\xed\xb4\x74\x04\xd5\x65\x55\x36\x17\x2a\x79\x94\xdd\xdc\x2e\x0a\xf6\xfa\xc5\x65\xba\xbb\x9d\x73\xd1\x91\xda\xc0\xef\xeb\x76\x40\xb5\xcb\xde\xa5\x4f\xfa\xdc\xaf\x14\x66\x60\xf1\x0c\xb1\x92\xc1\x3d\x30\xc8\x51\xe1\x04\x58\x50\x2e\x00\x25\x85\x17\xe9\x22\xfb\x5e\x67\x0e\x22\x27\x24\x11\x80\x7e\x03\xcc\x01\x43\xff\x4a\x71\xe1\x92\xa0\xfe\x06\xd5\x41\x28\xdb\x06\xf6\xb5\x8c\x23\x95\xff\x1c\xb6\x1e\xe6\x0e\xf6\xed\xca\xf9\x30\x82\x9d\x15\x86\xb7\x1e\x08\x68\x52\xdb\x24\x3f\x5c\x7d\xd8\x88\x69\xcf\x91\x00\x82\x7a\xdd\x58\x58\xee\xe7\xb5\xbd\x37\x14\xed\xb9\xfe\xee\x26\xfe\x64\xab\x5a\x95\x90\xaf\x0c\x1f\x05\x94\x35\x38\x1e\xd3\x3f\x39\x3c\xe3\x90\xa8\x18\x5d\xb7\xac\xbf\x71\x35\x15\xf7\x23\x34\xc5\xc4\xa0\xf0\xc6\x58\x2e\x51\x7f\x8d\xe4\x95\xec\xd5\x09\xb9\x29\xd4\x63\xc1\xdb\x75\xfe\x1b\x58\x62\x26\x52\x18\x83\x05\x0c\xe7\x98\x20\xa7\xd1\x94\x15\x24\x1c\xc1\x70\x5e\x1c\x48\x75\x4d\x09\xe9\x2c\xda\x8a\x12\xb8\x1c\xc3\x69\xf8\x07\xb0\x90\xbc\xe8\x81\x6b\x0a\xc4\x03\xb5\xb4\xd7\x15\xd5\x4a\xb2\xad\x2a\xfb\xc4\x31\x7d\x50\x97\x21\x5e\x6f\x29\x37\xef\xb5\x34\x07\x9a\xe8\x19\x25\xcb\xb7\x30\x86\x0b\xc0\xb2\x41\xb7\xe0\x3e\x61\x0d\x51\xf3\x93\x99\x1e\xe8\x17\x8e\xf1\xf3\x43\x1b\x3a\xad\xe9\x34\xbb\x32\x9b\x53\x2e\x74\x09\x93\x72\x14\x2c\xb0\x96\x0f\x45\xb5\x94\xac\xee\xbe\x30\xb9\xf7\x3e\xd9\xb8\x2d\x9c\xb4\xc0\x7f\x14\xc9\x23\x55\x74\xb1\x4f\xaf\x2f\xfd\x98\x65\x4b\x5d\x21\x8c\x57\x52\x6b\x04\x99\xd6\x08\xb4\xd6\x78\xe5\x97\xc2\x50\x4a\xc7\x97\x9d\x32\x2e\xbf\x49\x88\x6c\x6a\x1e\xbc\x98\xd9\x6d\x0c\x44\x55\x84\x77\xfe\x73\x58\xd8\xf3\xaa\x83\xe8\x6d\xe6\xb8\x51\xdd\x1c\x68\xaa\xee\x2d\x3b\x4c\x7a\x14\xa6\x62\xfe\x36\x08\x63\x8c\x88\x08\x70\xd4\x50\xf5\xde\x48\xb1\xfa\xdd\x5e\x98\xfd\x9e\xdf\x1e\x66\xa2\xa4\x10\xc0\xec\x1d\xb1\x92\xd7\x2c\x83\xb7\x20\xd0\x37\x7d\x39\x04\x30\x50\x43\x00\xc3\x73\x0d\xed\xdb\x1f\x95\x0c\x9c\x1e\xab\x7e\x6e\x18\x35\xb2\x73\xc3\xfe\x68\xa8\x60\xd3\xc5\xaa\xff\x00\x19\x52\x38\xf4\xba\x87\xd6\x46\x4f\x92\xab\x95\xc1\x73\x9c\xeb\x1d\xec\x25\x00\x30\x41\x0d\xef\x53\xf1\x99\x30\x04\x23\xf9\x87\x7a\xd0\xeb\xb4\xcc\x55\x8e\x42\x86\x9a\x6c\x48\x1c\x60\xc3\xba\x70\xeb\x8e\xd5\x0a\xac\xd8\x74\xc8\x49\x1c\xbe\x66\x60\x71\xfb\xd4\x2a\x89\xde\x61\xc7\x56\x79\xf4\xc5\xf6\x2d\x53\xe9\x37\x65\xd0\x37\xdc\x55\x95\xe5\x5a\x1f\xdb\xea\x8f\xdb\x61\x0c\xda\x9b\xae\xbc\xca\x59\x56\x2c\xc7\xf8\xde\x40\xd0\x06\xf7\xf4\xa3\xd2\x98\x9a\xad\x7d\xfb\xb6\x05\x24\x70\x86\xd8\xc6\x1c\x82\xfc\xda\xaa\x8c\x64\xb8\x0b\x3c\x45\x15\xa5\x5a\xa4\x2c\xaf\x84\x6f\xfc\xce\xa3\xd0\x3d\x43\x31\x16\x22\x6f\xeb\x12\x0e\x0a\x22\xb9\x71\xd7\x26\x5d\x38\x53\xf2\xca\x09\x48\xb9\x47\x2b\x2d\x41\xd2\x50\x09\xca\x72\x9f\xef\x52\x8d\xcc\x44\xda\xfd\xf6\x69\x5c\xa2\x6d\x55\xbe\xc4\x61\x32\x1e\xea\x41\x3d\x8f\xc4\xdb\x40\xbc\x9d\x58\xe1\x39\x46\x0c\xb2\x70\x8e\x43\x18\xd7\x68\xc0\x9a\x0e\xab\xd4\xdf\x7a\x83\xdc\x51\xdc\xfb\x5d\x9c\x0d\x3e\x57\x14\x7c\xc3\xb9\xe6\x61\x3b\xb7\xaa\xc1\xa3\x07\xef\x6c\x77\x9b\xb1\xd3\x28\x1d\xf7\xb0\xfd\x04\xdc\xdd\x9c\xdf\xbc\x03\x3f\x4d\xce\xde\xfe\xfa\xeb\x9b\xb7\xbf\xfe\xf9\xcd\x7f\xfe\x0c\xb4\xcf\x24\xff\x21\x28\x14\x94\x81\xc1\xd5\x50\x9d\x7f\x12\xaf\x80\x86\xd9\x74\x6c\xa6\x68\xaf\xb9\xb0\x34\xbb\x97\xf7\x33\x73\x2a\xf3\x84\x95\x2e\x0a\x58\x4a\x7a\xc5\xf2\xe9\xf5\x8a\x78\x77\xdb\x2b\x27\x14\xda\x84\x91\xa7\x24\x3b\xda\xc1\xdc\xd4\x17\xc4\x24\xf7\xa7\xe8\x34\x7f\xd2\xd5\xe4\xca\x35\x31\x0d\xed\xc9\xca\x9b\xd7\x7f\xf9\x15\x84\x73\xc8\x60\x28\x10\xe3\x40\x7a\x62\xfa\x08\x5a\x2e\x87\x78\xa5\xd1\x66\x6e\x3f\x0c\xde\xfc\xf2\xeb\x9f\xdd\xce\x2e\x74\x30\xae\xce\xd5\x79\x07\x5e\xb2\x94\xbc\x36\x9d\xbf\xcc\x9f\xab\x4c\x51\xbe\x4d\x49\xb9\x6e\xfb\xe6\xec\x64\xc9\x80\x16\x99\xc9\xce\x4a\x31\x3c\x9e\xa6\x24\x94\xac\x5a\xc3\x64\xfb\x48\xcf\xfe\x72\x28\x56\xf7\xa5\xd3\xc8\x5e\xeb\xf3\xb4\xc8\xb3\xe8\x9a\xb7\x1f\xec\x48\x0b\x0c\xb6\x03\xd7\x4d\x54\x15\x55\x8e\x5e\x73\x44\x38\x16\x78\x89\x14\x6b\xd7\x31\x7f\x1d\x8f\x65\xdf\x6f\x36\x71\x57\x0d\xcf\x8e\x8e\x7f\x28\xd2\xb1\x51\x1e\xba\xec\xc2\x4e\x64\x93\x37\x55\xab\x1e\x67\x44\xba\x38\xc5\xed\xf5\x09\xe8\xab\x13\xc7\x3f\x9f\xbd\xf9\xdb\xd9\xdb\xb7\x8a\xa2\xea\x88\xcd\x9e\x90\xea\x8a\x46\x53\xca\xf4\xb1\x2f\x8b\x11\xe7\xe7\x28\x89\xe9\x4a\xc5\x5e\x4b\xad\x38\xa5\x6c\x21\x49\xfb\x3b\x4c\xb0\x89\x59\x36\x23\x83\x09\xe6\x72\x74\xbf\x3b\xef\xf3\xab\xcb\x99\x61\x81\x09\x0a\x61\xca\x11\xe8\x8f\x86\x97\x79\x89\x2a\x8d\x34\x91\x0b\x4e\xb6\xa6\xed\xa6\x49\xe0\x85\x1b\x66\x93\x0f\x31\x88\xb2\x31\x36\x7f\x77\x92\x5c\x90\x99\x3a\xbb\x6e\xff\x6a\x98\x24\x01\x52\xad\x6b\x2d\x72\xab\xd4\xff\xf5\xd6\x98\xf3\x38\x08\x11\x13\x78\x8a\xc3\x9a\x58\x83\xf1\xf8\x6a\x50\x7a\xa2\x43\xa6\x97\xf3\xd8\x19\x5f\xf7\x0e\x38\xaa\xd7\x8b\x66\x4b\xa0\xb1\xc7\x1e\xcb\xf7\xd2\x74\xd7\xf5\x88\x12\xa7\xf0\x64\x83\xea\xb0\x4f\xc6\x7d\x36\xe5\x63\x85\x48\xe4\xf0\x71\xf7\x2e\xd7\x95\xcd\x4c\x59\xfc\x09\xd6\x65\x68\xa7\x2c\x0e\x16\xb0\xe9\x65\x5f\x3f\xb7\x7a\x36\x73\xce\x70\xf2\xf3\xed\xd5\x27\x98\x38\xd5\x93\xe5\x26\xd5\xbd\xa3\x5b\x68\x66\x7a\xdd\xa9\xcc\x8f\xcf\xb7\x57\xb6\xaf\xc2\x3d\x7c\xe3\xe3\x11\xfd\xf2\x3d\x1d\x8e\x1c\x2c\xac\xb7\xbc\x90\x78\x93\x95\x34\x7e\xe2\x4b\x89\x1f\xd7\xd2\x8f\xba\x96\xfc\x73\x35\xdf\x5d\x68\x1a\x12\xd4\xcf\x2e\xae\x2b\x7d\x0a\x27\x42\x48\x91\x15\xb2\xac\xc2\x90\xd7\x0d\x4c\xc5\x1c\x11\xa1\xdb\x98\x9c\x37\x29\xb3\x60\x82\xc4\x03\x42\x44\xf9\xf6\x5c\xb9\xe2\x85\xd8\x11\xc4\x7a\xa0\xef\x9f\xd4\xc6\x08\x9a\x18\xa2\xf1\xf8\x0a\x38\x93\xca\xdc\xf8\xca\x7c\x91\x0d\x67\xc2\x55\xbe\xd2\x01\x18\xa6\xf7\xde\x3c\xd6\x60\x0b\x75\x80\x18\x3c\x2e\x5e\xa0\x6c\xbb\x3a\xc6\xe3\x2b\xfd\xaa\xc2\x02\x31\xe9\x9a\x3e\x17\x39\xa7\x21\x56\x71\x05\x6a\x13\xec\x74\x53\x54\x98\x6e\xe4\xc9\x54\xe7\x5f\x22\x71\xea\xf5\xd6\xa4\xb5\x1e\x86\x6c\xae\x73\x17\xc8\x4a\xf1\xbc\x34\x77\x50\x93\x34\xbe\x91\xe7\x87\xc4\xb4\xd8\xbb\xad\xf3\x35\x68\xbd\x5e\x7a\x32\x66\x4d\xdb\x8b\xce\xd9\xb2\xfc\xba\x47\x67\xb1\x05\xc5\x53\xb1\xcd\x17\x14\xa6\xa5\xb1\x2f\xce\x5a\xcc\x62\xf4\x4c\x45\x5f\x75\x51\xc6\x50\xa8\x82\x7b\xe4\xca\x22\xa8\x18\xab\x25\x17\xd6\x9c\x72\x71\x9b\xc6\x88\x83\x85\x24\x83\x5f\x86\xef\x83\x49\xdb\xbe\x54\x29\xa6\xca\xcc\x7d\x82\xc9\x29\xc0\x22\xaf\xa9\x0e\x60\x59\x65\x98\xe8\x1d\xaf\x2f\x7b\x7c\x46\x59\x16\x65\x35\x49\xc3\x7b\x24\xaa\x5f\x79\x6b\x13\x93\x77\xf1\x52\xff\x15\xc3\xa9\xa5\xa2\xc2\x7c\xe8\xeb\x43\x1f\xcc\x01\x8c\x34\x24\xba\x82\xb1\x70\xc3\x4a\xa3\xa5\xae\x25\x6f\xe0\x37\x7c\x5d\x67\x0c\x54\x8c\xef\x91\x1a\xec\x2d\x7a\x60\x58\x20\x7e\x0a\x90\x08\x7b\x40\xc0\x7b\x04\xd0\x74\x8a\x42\x01\x12\x86\xa9\xc2\xf6\xe0\x88\x44\x3a\x32\xd6\x57\x9c\x0c\xfd\x2b\x45\x5c\x14\xa6\xd4\x03\x1f\xe9\x03\x5a\x22\xa6\xf2\xf7\x0b\x12\x80\x9d\x5c\x78\x5f\x6f\x56\x4c\xd2\xc4\x65\x19\x85\xa2\xd4\xe4\x03\xc2\xb3\xb9\x40\x91\xef\xb8\x70\x9f\x64\x03\x4a\xa4\xb6\x43\xf1\x4a\x0d\x81\x39\x5d\xda\x97\xf3\x75\xbd\x9d\x56\x8a\x82\xb2\xc2\xaa\x2a\xa3\x6b\x89\xf5\xa9\xa0\x16\xd7\xc2\x64\x2b\xe7\xf7\x99\xc5\xb7\x48\x8b\xba\x14\xae\xf2\xac\x7b\xd5\x83\xf2\xfa\xca\x1c\x02\x5f\x1e\x4f\xc0\x67\x8e\xc0\x4b\x15\x0e\xf6\xd2\x80\xb2\x4c\x90\x50\xa8\x2b\x88\xcd\x5c\xfb\x36\xcb\xd7\xc8\x42\x8a\xab\x5a\x8d\x5e\x57\x16\x7a\x1f\x2c\xa8\x3a\xcb\x84\x44\xcd\xd2\x11\xe4\x55\xa2\x42\x1a\x7f\x2f\xa8\x07\x73\x30\xda\x2c\x40\xcd\x8f\x3b\xdf\x26\x7a\x6b\x0d\x99\xc0\xc1\xe3\xb6\x12\x28\xe6\x81\x52\x55\x88\xf5\xbe\x5b\x8d\x66\xa2\x68\x60\x3c\x72\x45\xa9\x78\x2a\x5f\xa6\x95\xa3\x3d\x4d\x9e\xac\xca\xd1\xec\x23\x28\xe6\x9f\xf4\x88\xe4\x6a\xd4\x83\x33\x8f\x2a\x95\xf0\x8a\xab\x9e\x80\xe4\x3e\x2e\x96\x3a\x7d\x8e\x3a\xf6\x20\xcb\xc6\x6b\xf4\xfb\x3a\x39\x39\xae\xa7\x9a\xf5\xa4\xfe\x60\x69\x8c\x4a\xd7\x75\x9b\x57\x54\x31\x74\x7a\x93\x43\xa2\x03\x99\xd2\x18\x15\xc1\x75\xcc\xfa\x79\xfe\xab\x44\x79\x22\x6c\x3b\x17\xc4\xba\x1d\xfe\x7e\x77\x4b\x17\xc4\x71\x3b\x4a\xfe\x61\xa5\x0b\xc2\xab\x7c\x0f\x6f\x2a\xda\xd1\x28\x6e\xbc\x36\x3a\x1d\x6b\x1c\x0d\x9f\x9a\x8d\x9c\x8e\x2a\x47\xa3\xb2\x9b\x82\xd3\x91\xe6\x1e\xc5\xa9\x23\x2d\xbe\xa7\xb6\xd1\xbf\xe8\x94\x4f\xb1\x46\x39\x96\x16\xfd\x51\x3d\x6e\x54\x8f\x8a\xfb\x81\x5e\x83\x3d\xb3\x7a\x82\x05\x66\x8c\x32\x73\xde\xd2\xab\x4e\x70\x04\x1b\xd3\x7a\x4c\xae\x5a\xa5\x4e\x75\xe2\x3e\xe5\xd2\xd5\x2f\x54\x87\x68\xa5\xb0\xb3\x75\x7c\x59\x7f\xd8\xb1\x86\x1f\x87\x39\x91\x6c\xc2\x03\xbb\x04\x8b\x79\xa5\x7c\x27\x7c\xb0\xde\x62\x1d\x3f\x7a\xe0\x3d\x9a\xca\x05\x38\xa5\xec\x01\xb2\xa8\xb8\x47\xf4\x75\x6c\x81\x37\xa7\xd9\x49\xa6\x3d\xc8\x04\x30\x49\x62\xab\xdc\x18\x8a\xd1\x12\x16\x42\xf6\xe7\x08\x46\x88\xf5\xcd\xd6\x33\x4f\xaa\x83\xd2\xd3\x64\x42\xfb\xaf\x79\xae\x9e\x79\xd3\xdf\x15\x91\x7e\x18\x51\xd1\x02\x22\x65\x85\x1f\x17\x6c\xf7\xb8\x70\x5c\xb2\xc7\x25\x1b\xbd\x53\x55\x61\x76\xb0\xdd\xf0\x2f\x25\x72\xf7\xdc\xdb\x8d\x63\xae\x5f\xf7\xec\xb7\x16\x07\x3a\xb7\x72\x79\xf9\x83\x7b\x91\xfb\xbc\xb3\x91\x14\x86\xce\x9e\xa8\x08\xc2\x5e\xf8\xba\x35\xde\x8e\xf3\x86\x03\x5c\xa1\xec\xe7\x92\x49\xc7\x58\xff\x34\x39\x7b\xfb\xe6\xaf\x6f\xff\xfa\xeb\x5f\xff\xfa\xeb\xcf\xef\xc0\x50\x05\x6e\x01\x3e\x87\x0c\x49\x73\x23\xe4\xd6\xda\x46\xfd\x4d\x29\x03\x84\x3e\x80\x94\x08\x1c\x83\x29\x8d\x63\xf9\x47\x92\xf5\xa7\x56\x50\x76\xda\x81\xe0\x42\x43\x52\x2f\x92\x18\x39\xd9\xc4\x95\xb1\x61\xaf\xf5\x2b\xb3\x37\xd6\xca\xcc\x6e\xc2\x00\x5d\x3c\xff\xca\x28\x96\x4a\xe0\xfe\x0e\xc5\xff\x31\x77\x7c\x07\x89\x51\xd9\x13\x6b\xa4\x3f\x54\xc9\x10\xe7\x8b\x03\xe3\xca\xaa\x31\x76\xf7\x26\xf5\x3b\x50\x4d\xf2\x74\x4a\x39\xc5\xcc\x83\xab\xf0\xcd\x9f\x34\xba\x08\x41\x7f\x88\x60\x4e\x93\xa0\x02\x14\x63\x2d\x90\xbc\x05\x4e\x70\x3d\xaa\x39\x24\x51\x6c\xbc\x2a\xb9\xa3\x49\xa4\xd9\x15\xbc\x82\x62\x7f\x88\x8f\x34\xb1\x7d\xb4\xa1\x5c\x45\xfd\xb5\xc3\x93\x2e\x9e\x34\xa4\x5a\xdf\xd9\x50\xe8\xe3\x6d\xe9\xd8\x4b\x07\x49\x6e\x17\xde\x5b\xc0\x0a\x55\x8f\x05\xfd\x4f\x8b\x55\xe1\xd2\xd8\xdf\x35\xd4\xd0\x1b\x80\x6b\x2a\x90\x23\xc3\xa1\xf2\xca\x34\xc8\x8e\x0a\xac\xb2\xf8\x3a\x5e\x77\x11\xe2\x02\x13\x65\xd9\x6f\x15\x8a\x3c\xe6\x00\x82\x24\x9d\xc4\x38\x04\x3f\x11\x4a\x5e\x5b\xf4\xf6\x9f\xc1\x70\x04\x06\xc3\xf3\x5b\x0d\x37\x5f\xcb\xe0\xab\xf7\x6d\x78\xfb\x21\xa3\xcd\x6d\x1a\x77\x8a\xc3\xcb\x84\x04\x22\x25\x04\xc5\x2d\x54\x89\x99\xd5\x97\xd1\xf5\x9d\x6a\xda\x64\xa9\xd4\x10\x32\xeb\xa3\x0d\x39\xb3\x46\x87\xa1\x64\x4d\xdc\xbb\x32\x77\x45\xa5\x13\x48\xdf\x72\xff\xbe\xb1\x5b\xc4\xa3\xbe\xbc\x47\x97\x5c\x1b\x35\xae\xe7\x62\x5f\x2b\xc2\x07\xab\x41\xc4\x4c\xe9\x9a\x0a\xf4\xd2\x27\x69\x70\xf7\x2d\xd1\x79\x25\xb7\x7a\xd9\x1e\x16\x1e\x79\x14\x98\xe9\x4c\x8e\xbf\x7e\xd5\x23\xad\x05\x9c\x56\x9b\x2a\x95\x3f\x66\x4b\x1c\xa9\xf0\x6a\x4a\x5e\x09\x00\x43\x91\xaa\x1b\xcf\x0e\x03\x4f\x97\xea\x39\x75\xb3\xce\xd0\x26\x50\xe9\x4a\x3b\xa7\x4b\xc7\x25\xe4\xc9\x59\xa0\x42\x99\xdf\x75\x45\xcf\xb2\x79\x56\x17\x12\x6b\xe7\xa1\xae\x29\x99\xb6\x5b\x0a\xec\x5d\xb1\x10\x58\x59\x15\x4e\x4b\xe9\x75\xff\xae\xcb\x16\xf3\x47\xd3\x38\xad\x2a\xc8\x39\x55\xd4\x30\x07\xd7\xfd\xbb\xac\xb0\xd2\x53\x2a\x9b\xb6\x49\x9d\xe5\x35\x29\x7a\x2d\x91\x5e\xfc\xca\x18\xd7\xfd\xbb\x67\x52\xef\xc2\xf7\xde\x54\xa1\xd7\xa6\x39\x3c\x52\x48\x86\x23\x6e\x82\x61\x74\xd1\x7e\x3c\xd5\x95\x66\xfb\xa6\x6e\x94\xae\x2c\x92\xc3\xb3\x82\x4f\xfd\xeb\xcf\xfd\xab\xe0\xe6\xfa\xea\x1f\x4f\xa4\x56\x94\x1f\x89\xcf\x20\x26\x41\x3b\x3a\xe5\xb9\x4e\xc3\x91\x9b\xd8\xa4\xae\x59\x54\x87\xe6\x16\x92\x23\x49\xcf\x2c\x10\x48\x51\xd4\xeb\x89\x0b\x28\x70\xe8\x16\xdf\x2a\xd5\xe9\x83\x9c\xe3\x19\xb1\xc5\xd8\x50\x51\x52\x3b\x4c\xe7\xbd\xdb\xae\x04\xad\xdb\xeb\x8d\x50\xb7\xf7\x7b\x47\xeb\x95\xff\x6c\xb0\x5e\xef\x2f\x47\xaa\xe2\xc9\xb3\x34\x61\xc5\xad\x5d\x13\x5a\xe5\xf5\x12\x55\xce\xa9\x25\x0f\xe6\xc5\xd4\xbb\x1a\xf2\x64\x7b\xc5\xf6\x74\x2a\x6e\x33\x77\x4d\xb0\x13\x30\x62\x08\x2d\x12\x81\x97\x48\x57\x93\x5d\x22\xa6\x42\x5e\x15\xd0\x75\x76\xcf\x9f\x1f\x34\x44\x88\x27\x58\x68\x52\xc4\x30\xbc\xd7\x45\x8d\xee\x09\x7d\x20\x4a\xdb\x9c\x3a\x7d\x43\xb7\x40\x22\xe2\xf6\xac\x0e\x49\x49\xd7\x4b\x0a\x0a\xb0\xc0\x04\x2f\xd2\x85\xba\x82\x9f\xc4\x48\x57\x62\xb0\x6f\x83\xe0\xb7\x74\x82\x18\x41\x6e\x6e\xec\x09\x18\xe7\x37\xff\x16\x75\x29\xa7\xfd\x89\xf3\x60\x7e\x6e\x9b\xf5\xa9\x4c\x87\xaf\xb1\x55\x82\x2c\x26\xbe\xc5\x38\x01\x51\x8a\x9c\x67\x73\x3d\xe7\xe0\xb0\xb8\x81\x2c\x27\xca\xa0\x18\xa4\xba\x37\x7f\xf9\x5b\xef\xed\x9f\xff\xd4\xfb\xa5\xf7\xcb\xd9\x9b\xbf\xa8\xac\xbb\xf8\xb5\x54\xd7\xfa\x84\xd7\xdc\xfc\x2b\x19\x22\x15\xaf\x2e\x8e\x8f\x44\xae\x0b\xa5\x6b\x92\x4e\x29\x03\x97\x03\x07\xbc\xd6\x2b\x6b\x1b\x14\x4b\xbb\xaf\x8b\x08\xb0\x26\xd1\xfb\xae\x5c\xcb\x9c\x0b\xe6\x47\xee\xac\xbd\xd1\xc8\x4b\x5b\x9a\x04\x8f\x7c\x1d\x61\xc2\x71\x54\x80\xf3\x1b\x19\xc0\x1a\x3f\x78\x43\x39\x47\xc3\xd1\xf2\x4f\x1e\x50\x51\x6f\xef\x46\x8f\xa3\x30\x65\x58\xac\xd6\xdc\xe3\x8e\xcd\x23\xde\x3d\x6e\xe3\x9b\x3e\xee\xb6\x3e\xc8\x2d\xeb\x7e\xec\xf4\x1e\x59\xa2\x03\x07\x96\x49\x18\xcc\x29\x17\x81\xdf\x9b\xcf\x1a\xf5\xe8\x97\xd1\xe0\x23\xe5\xc2\x47\x62\xdd\x5c\x13\x26\x53\x7a\xea\xba\xc6\xbc\x1d\xf4\x47\x43\x1b\xa2\x76\xd8\xd2\x6d\x8f\x42\x60\x13\x69\xd1\x88\xc6\x46\x19\xaf\x25\xb3\xcb\x30\xe5\x84\x15\x5e\xf0\x84\xe8\xdf\xd0\x23\xab\xa6\x20\x58\x8b\x4d\x7c\x40\x28\xe1\x2d\xa5\xaa\xb4\x10\xeb\x44\xcb\xc7\xf6\x5d\x87\xfb\xdb\x31\xdc\x6f\xab\xae\xcd\x20\x3b\x77\x77\xa4\x0f\x4f\xd4\x03\xe7\x98\xdf\xd7\x41\x53\xe8\xa0\x24\x07\x76\x1d\xb4\xbf\x5c\x92\xad\x2b\x31\xa2\x2d\x71\x9a\x6f\x95\xcf\xfd\x81\xec\x16\xe7\xb9\x16\x09\xb8\x84\x95\x5d\x87\x63\x5c\x05\x5e\xbd\x69\x97\xb0\x11\xc0\x78\x1d\x78\xf1\xb6\xd0\xd9\x8f\x88\x00\xbd\x91\x64\x5a\xdf\xc1\x30\xa4\x29\x69\x85\x42\x6e\xcb\xbf\xe8\x96\x5a\xc2\x6c\x0d\x7b\x87\xaa\x36\x40\xdc\x7e\x35\xc3\x4b\x44\x24\x05\x24\xbd\x4b\xa9\x7e\x70\xc2\xb3\xb2\xed\x46\xe8\x80\x01\x11\xb4\xbb\x87\xfe\x0c\x11\x51\x7a\x3d\xe6\x6a\x04\x35\xd0\xd2\xa6\x6d\x5f\x3f\xdc\xb0\x32\xc4\x27\xbf\x55\x6b\x46\x61\xb8\x68\x77\x38\x84\x16\x10\xc7\x95\x5c\xcc\xf5\xc0\x91\x91\x4f\x8c\x91\x7b\x74\xfc\x9e\x18\x68\xa5\xb5\xc9\x1d\x03\xaa\xd4\xc3\xb2\x2f\xef\x02\x4c\x65\x09\x91\x6a\x2d\xd0\xd2\xe1\xd9\xd9\xa5\x90\xe3\x9d\xf1\xa0\x78\x45\xb5\xfe\x02\x6b\x4f\x4c\x68\x55\x15\x26\x1f\xf2\x56\x45\x61\x0a\xcd\x5b\xd6\x84\xa9\x2c\xbf\x5f\xaa\x0c\x03\xb2\x44\x99\x01\x25\x3a\x8b\x9f\x7b\x0c\xdd\x0e\xfa\x36\x7b\x77\x37\xfd\xfc\xca\x1b\xd2\xe6\x21\xd8\xea\x08\x4d\xce\xd0\x2d\x65\xa3\x8f\xd8\xec\xb4\x73\xc0\x40\x5c\x11\xd0\x8a\xb9\x60\x78\x92\x0a\x14\x81\x05\x8d\x90\x8a\x83\x55\x7b\x70\x87\x70\xbd\xaa\x75\x52\xe6\xe8\x0f\x19\x5f\x66\x60\x4e\x67\x2c\x09\x37\xa1\x9c\x5e\xde\x8e\x06\xfb\x07\x39\x6d\xac\x9c\x75\x07\x97\x2c\x09\x1f\x17\xb9\xb4\xf9\xc9\x4b\x2d\x74\xe9\xa6\xf5\xf1\x59\xb5\xd9\x8c\x44\xda\x02\x7d\x34\x2b\x25\x25\x68\x48\x63\x0b\x6c\x4e\x2a\x5a\xb9\xb8\x15\xb2\x5f\xc9\xf6\xca\x15\x54\xc6\x5d\xed\x2e\xc4\xe9\xde\x17\x50\x67\x01\xb7\x7d\x17\xb0\xdd\xe2\x3a\x42\x6c\xff\xc8\xb0\xc0\x8f\xb2\x66\x3a\x8a\xad\xfd\xbd\xab\xe6\x88\xa6\xfd\xc3\x2e\x9b\x23\x9a\xf6\x11\x4d\x3b\x63\xe3\x11\x4d\xbb\x92\xe7\xcf\x13\x4d\xdb\x98\xb5\x42\x9e\x6d\x85\x4d\x2b\xa4\xb8\x1e\x6a\x23\x55\x80\x7d\x10\xde\xd8\xba\x69\xba\x70\x65\x42\xb1\x13\x4b\xd7\x2a\xc5\xd9\xc9\xe1\xcc\xb2\x9d\x55\xda\xa6\x0a\x37\x33\xb8\x95\xfa\x4e\x00\x73\x43\xe8\xec\x1d\xcd\x05\xfe\xf0\x19\xcd\xf5\x47\x35\x3b\x49\xa4\x2f\x13\x47\x5a\x16\x15\xd6\x66\xb2\xa1\x7d\x15\xa4\x29\x9b\xeb\xa8\x1c\x43\x52\x6a\x27\x4b\x78\x1b\xec\x69\xba\xc8\x43\xa0\xab\xe2\xeb\x4d\x26\x74\x0e\xfc\x64\x47\x97\x87\x78\xb5\x2a\x96\xfc\x43\x9c\xfa\x24\x94\xc6\xf5\x5a\x6a\x94\x7f\xdb\xa1\x00\x6a\x91\x8d\xad\x9b\x1a\x6a\x02\xc3\xfb\x34\xc9\x29\x58\xb3\xdc\xf4\x63\x2e\x07\x36\x8a\xc3\x66\x69\x2d\xf1\x6d\xd7\x02\xfb\xba\xac\x6a\xf9\x13\x52\x83\x79\x90\x90\x53\x5b\xb8\x59\x3c\xaa\xdc\xab\x3a\x15\x87\xb7\x81\xa8\x92\x8e\x56\xb9\x68\x71\x43\x52\x6c\x41\x8e\x8d\x24\xd9\xbb\x7e\x51\x0e\xf3\xfa\xed\xbd\x74\x06\xbb\x76\xa6\x3c\xe6\xf1\x1e\xb6\xef\x1b\x74\x46\x1d\xfc\x99\xa7\x31\xda\xa3\x22\xd6\x6f\x41\x0a\xa7\xbe\xd9\xce\xa1\xf1\x7a\xee\x1c\x94\xe0\x13\xde\x6c\x7b\x3d\x0d\x6c\x71\xe7\x53\x80\xfe\x80\xa1\x30\xc8\xd4\xc7\x6d\xf7\xc1\xb7\xdd\x77\xae\x7e\xa8\xd9\x6d\x97\xee\x28\x2b\xf5\xdd\x71\xaf\xbd\x13\x0b\x23\xc2\x8d\xb7\x2e\x77\x83\x47\xb8\x74\x69\x69\x61\xee\xc2\x4e\xdf\xab\xac\xb3\x47\x7b\x33\x36\x0d\x0c\x5d\xf7\xed\xd1\xfe\x04\xbe\x53\x35\xda\xb6\xbb\x1e\x39\x56\x65\x3b\x56\x65\x03\xc7\xaa\x6c\xc7\xaa\x6c\xdf\x59\x95\xed\x47\xc2\x91\xae\x9a\xde\x7b\xb5\x42\xbf\x63\x76\xba\x83\x2e\x4c\xae\x42\xfb\x7e\x57\xa9\x86\xef\x72\x5f\x76\x51\xcb\xa1\xc4\x83\xd6\x34\x5b\xcb\x85\x66\xb2\x6a\x9c\xe3\xe9\xb6\x54\xdf\xa6\x34\xc3\x77\x51\xbe\x51\xc5\x06\xaf\x45\xa9\x7a\x43\xdb\x8a\x0d\xbe\xb9\xa8\xa8\xde\xd0\xb4\x62\x83\xd7\xd1\xa6\xea\x0d\x4f\x4d\x3c\x8e\xe5\x16\x9f\x80\xf3\x74\xb4\x87\xcf\xd1\x1e\x56\x2e\xbd\xa3\x71\x3c\xb8\xf6\x3b\x5a\xca\xa3\xa5\xdc\x24\x2b\xc7\x42\xaa\x15\xaf\x3c\x16\x52\x3d\x16\x52\xdd\x65\x21\xd5\xa3\xdb\xf3\xec\xdd\x9e\x1d\x55\x5c\x3d\x3a\x3e\x60\x27\xc6\xec\x58\xbf\xf1\x58\xbf\x71\xad\xa8\xec\xac\xe4\xea\x71\xc5\x82\xef\x58\xb1\xc7\x9a\xab\xfa\xe7\xb8\x66\x8f\x35\x57\x8b\xaf\x3c\x9e\xc2\x1d\xdd\xd1\x43\xc5\x70\x2c\x13\x12\xcc\xa0\x40\x0f\x70\x4d\xbc\xd2\x97\xd1\xf5\xa5\xf7\x4c\xe7\x82\xef\xbf\x24\xc4\x8c\xb0\x73\xb1\x1c\xfb\x4f\x74\xf9\x32\xba\x06\x86\x87\xea\x9c\x21\x0c\x51\x22\x0a\xf9\x42\x3f\x7c\x7e\x49\xa9\xca\xd0\xda\xa2\x41\x5b\x89\x62\x56\xae\xa8\xd3\x38\xc5\x3b\x00\x04\x6d\x18\xbc\x64\x7c\xc5\xb2\x8a\x69\x22\xe1\x35\x0a\x48\x07\xbe\x3a\x40\xfc\x52\xf8\x35\x5f\x8b\x87\x8d\x79\xb4\x6c\xc9\x71\x12\x85\x5e\xdb\xb8\x4e\x35\x2a\x51\xff\x1c\xc6\x79\xfa\x2e\x0a\x37\xa5\x6d\x0d\x3d\x65\x5f\x38\x2f\x28\xa2\xc2\xd0\xf1\x14\x40\xf0\x11\xcf\xe6\xc0\x08\x2f\x8e\xb1\x58\x79\x7a\xca\xc1\x2f\xf6\x4f\x12\x15\x62\x6a\x99\x63\xcb\x4c\xc1\xb7\xac\x05\xd6\x1d\x2e\x25\x08\xb1\xc0\x82\xc8\x6f\xc1\x2f\x55\x4f\x41\x41\xc2\x67\xc5\x59\x5c\x8a\xfa\xd5\x0c\x73\xd6\x79\xfd\xa8\x7a\xe0\x2a\x0f\xa0\x82\xc6\xf2\x05\x17\xa6\xeb\x2d\x88\x6d\x9b\x76\x8f\xe8\xb3\x30\xf9\x2e\x7a\x7f\xec\x83\xcb\xc1\xa8\x11\xb5\x4b\xf2\x9c\x51\x1b\x0c\xa7\xd2\x4e\x2d\x71\x64\xb3\x4a\x9d\x56\x2a\xe2\x1e\xa6\x82\x2e\xa0\xc0\xa1\xba\x21\x48\x39\x2a\x85\xfd\x71\xb8\x40\xbe\xb9\xcf\x52\x49\x87\xe7\x16\x3b\x49\x8d\xbb\x30\xde\x4a\x5e\x5f\x0e\x46\x4f\x7d\x4d\xb5\x2e\xe9\x62\xaa\x7c\xea\xc2\x48\x19\xf6\x6a\xb4\x22\x70\x81\x43\x7b\x03\xd3\xfd\x0a\x2e\xfb\x74\xa2\xfe\x3b\x00\x00\xff\xff\x05\xe9\x2e\xe9\x9f\x78\x01\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\xff\x73\xdb\x38\xb2\xe7\xef\xf9\x2b\x50\x71\xd5\x65\xe6\x95\x23\x4f\x32\xbb\x33\x6f\xf3\xea\xaa\x4e\x91\x1d\x47\x37\x8e\xa3\xb2\x9c\xec\x6d\x5d\x5d\x69\x20\x12\x92\xb0\xa6\x00\x2e\x00\xca\xd1\xe6\xf2\xbf\xbf\xc2\x37\x12\xe0\x17\x91\x54\x24\x8b\x76\xe4\xaa\x99\xd8\x12\x01\x02\xdd\x8d\x46\xa3\xd1\xfd\xe9\x13\x30\xa0\xf1\x9a\xe1\xf9\x42\x80\xd7\xbf\xbc\x7e\x0d\x2e\x29\x9d\x47\x08\x5c\x5d\x0d\x9e\x9d\x3c\x3b\x01\x57\x38\x40\x84\xa3\x10\x24\x24\x44\x0c\x88\x05\x02\xfd\x18\x06\x0b\x64\xbf\x39\x05\x9f\x11\xe3\x98\x12\xf0\xba\xf7\x0b\xf8\x49\x3e\xf0\xdc\x7c\xf5\xfc\xe7\xff\x7a\x76\x02\xd6\x34\x01\x4b\xb8\x06\x84\x0a\x90\x70\x04\xc4\x02\x73\x30\xc3\x11\x02\xe8\x4b\x80\x62\x01\x30\x01\x01\x5d\xc6\x11\x86\x24\x40\xe0\x1e\x8b\x85\x7a\x8d\xe9\xa4\xf7\xec\x04\xfc\xc3\x74\x41\xa7\x02\x62\x02\x20\x08\x68\xbc\x06\x74\xe6\x3e\x07\xa0\x50\x03\x56\x3f\x0b\x21\xe2\x37\x67\x67\xf7\xf7\xf7\x3d\xa8\x46\xdb\xa3\x6c\x7e\x16\xe9\x27\xf9\xd9\xd5\x70\x70\x71\x3d\xbe\x78\xf9\xba\xf7\x8b\x6a\xf3\x89\x44\x88\x73\xc0\xd0\xbf\x12\xcc\x50\x08\xa6\x6b\x00\xe3\x38\xc2\x01\x9c\x46\x08\x44\xf0\x1e\x50\x06\xe0\x9c\x21\x14\x02\x41\xe5\x80\xef\x19\x16\x98\xcc\x4f\x01\xa7\x33\x71\x0f\x19\x7a\x76\x02\x42\xcc\x05\xc3\xd3\x44\x78\xd4\xb2\xc3\xc3\xdc\x7b\x80\x12\x00\x09\x78\xde\x1f\x83\xe1\xf8\x39\x78\xdb\x1f\x0f\xc7\xa7\xcf\x4e\xc0\xdf\x87\xb7\xef\x3f\x7e\xba\x05\x7f\xef\xdf\xdc\xf4\xaf\x6f\x87\x17\x63\xf0\xf1\x06\x0c\x3e\x5e\x9f\x0f\x6f\x87\x1f\xaf\xc7\xe0\xe3\x3b\xd0\xbf\xfe\x07\xf8\x63\x78\x7d\x7e\x0a\x10\x16\x0b\xc4\x00\xfa\x12\x33\x39\x7e\xca\x00\x96\x74\x44\xa1\x24\xda\x18\x21\x6f\x00\x33\xaa\x07\xc4\x63\x14\xe0\x19\x0e\x40\x04\xc9\x3c\x81\x73\x04\xe6\x74\x85\x18\xc1\x64\x0e\x62\xc4\x96\x98\x4b\x6e\x72\x00\x49\xf8\xec\x04\x44\x78\x89\x05\x14\xea\x93\xc2\xa4\x7a\xcf\x9e\xc1\x18\x1b\xfe\xbf\x01\x01\x65\xa8\x17\x10\xb6\xec\x05\x11\x4d\xc2\xde\x5c\x89\x52\x2f\xa0\xcb\xb3\xd5\x2b\x18\xc5\x0b\xf8\xea\xd9\x1d\x26\xe1\x1b\x30\x46\x6c\x85\x03\xf4\x01\xc6\x31\x26\xf3\x67\x4b\x24\x60\x08\x05\x7c\xf3\x0c\x00\x02\x97\xe8\x8d\x12\x88\x44\x54\xf4\x66\x9e\xe2\x31\x0c\xe4\xa3\x84\x2d\x5f\xf2\x35\x17\x68\xf9\x4c\xce\x2d\xeb\x64\xa0\x3b\x79\x06\xc0\xca\x0e\x71\xf5\x6a\x8a\x04\x7c\xf5\x0c\x00\xae\x87\xf0\x9e\x72\x71\xad\x9e\x7e\x6e\xdf\xa9\x5f\x04\x63\xcc\xe5\xcb\x9e\x3f\x03\x80\x21\x4e\x13\x16\x20\x2e\xfb\x06\xe0\xa5\xe9\x5f\x3f\x38\x31\xed\x26\x30\x0c\x25\x1b\x9e\x69\x11\xd4\x13\x35\x43\xe8\x7b\x5f\xd9\xe9\x9a\xe9\xbf\x31\x1f\xdb\x61\xcb\xff\xa7\x1f\x45\x70\x8a\x22\xfe\xc6\xfc\x6b\x3e\xb6\xe3\x19\x9e\x67\x6d\x05\x64\x73\x24\xde\x61\x14\x85\x5e\x17\x38\xbc\x45\xcb\x38\x82\x42\x4e\x31\x66\xf4\x9f\x28\x10\xfc\xec\xeb\x57\xf3\xeb\xb7\x6f\x67\x0c\xcd\x25\x7b\xcf\xbe\x7e\xd5\xbf\x7d\xfb\x76\x66\xa6\x82\xe4\x87\xb2\xaf\x6f\xdf\x9e\x17\xba\x1b\x40\xf2\x16\x7d\xe2\x28\xbc\xa5\x1f\xa0\x08\x16\x37\x66\x50\x9a\x9a\x82\x25\x28\x37\xda\xfe\x0a\xe2\x48\xae\xa8\x21\xe9\x73\x8e\xc4\x90\xac\x10\x11\x94\xad\xbd\xa7\x23\x1a\x28\x71\x83\x11\x16\xeb\x37\x40\x0f\x09\x46\xb9\xbe\x6e\xd0\x0c\x31\x44\x52\x96\x68\xb6\xdc\xa1\xf5\x1b\x40\x90\xb8\xa7\xec\xee\x06\xcd\xd2\x6f\x00\x10\x33\x4b\x19\xfd\xad\xf3\x55\x88\x78\xc0\x70\x2c\x94\x7c\xfc\xff\x97\xce\x37\x00\xdc\x2e\x90\x6d\xa1\x56\xfd\x02\x07\x0b\xa9\x01\x18\x92\xe2\xa3\xd7\x97\xa1\x55\x0f\x0c\x67\x60\x1e\xd1\x29\x8c\x4e\xdd\xcf\xbd\xfe\x96\x09\x17\x60\xaa\x95\x1c\x26\xea\xb1\x9b\x77\x83\x57\x7f\x7b\xf5\x9f\x60\x38\x02\x4a\xa0\x7b\xde\x4b\x03\x48\xa4\xde\x9c\x22\x10\xa2\x08\x09\x14\x7a\xdd\x61\xa5\x01\x19\x02\x50\xfe\x47\xd6\x76\x5c\xa1\xec\x8d\x41\x32\x47\x52\xa9\xcd\x10\x63\x72\x71\x4b\xcd\x25\x64\xf7\x4a\xff\xa2\x28\x94\xbd\x7b\xfd\x51\x12\xad\xe5\xbb\x12\xa9\xf0\x95\x22\x1e\x5e\xdf\x5e\xdc\x5c\xf7\xaf\x80\x58\xc7\x8e\x6e\xfe\x3c\x1a\x4c\x46\x17\x17\x37\xc3\xeb\x4b\xa5\x27\xdc\x4e\x86\xa3\xf1\xc5\x60\xa2\x1a\x0e\x3e\x5e\x5f\x5f\x0c\x6e\x41\x9c\xb0\x98\x72\xc4\x7b\xce\x83\xf3\xd5\xdd\x1b\xaf\x9d\xb7\x60\xae\x0b\x6c\x2a\x5d\xc4\x4e\x77\x8c\x26\x71\xbd\xde\x28\x5d\x2d\x1c\x45\xb3\x49\x84\xc9\x5d\x5e\x94\x78\x32\xdd\x2c\x4d\xd9\x03\x2d\x04\x2a\x6b\xd4\x48\xa6\x20\x91\xdc\x34\x9f\x00\xec\x0b\x94\x51\xe6\x28\x3c\x05\x58\x94\x89\x57\xf6\xb2\x17\x3c\x95\x8a\x1e\x70\xa4\xc0\xeb\x2f\x80\xa4\x99\x14\x5c\x0e\x2e\x26\x17\xd7\xe7\xa3\x8f\xc3\xeb\xdb\xb3\xf3\xeb\xf1\xe4\xe6\x62\xfc\xf1\xea\xf3\xc5\x8d\xd7\x5d\x7b\xc6\x8f\xcb\x28\xfa\x80\xbc\x0f\x28\x91\x36\x06\x62\x9e\x62\x91\xf3\x7e\x03\x8c\xca\x2c\x13\x04\xf7\xab\x8a\xed\x61\x0a\x83\x3b\x44\xc2\xc9\x34\x09\xee\x90\x28\xdb\x25\xde\xea\x27\xde\xba\x0f\x34\xdf\x2b\x76\xbb\x29\x68\x4d\x76\x36\x75\x87\x74\x80\xdd\xa0\x5a\xdf\x9b\x25\xaa\xa9\xe9\x2e\xcf\x94\x27\xfa\xab\x89\xb7\x9d\x56\xaf\xcd\xf4\x0d\x72\x2d\xca\x95\xa3\x9b\x67\x92\x9b\x93\x5b\x63\xc8\x08\xca\xe0\x1c\x79\x2c\xab\x91\x57\x23\xad\x5c\xb7\xdc\x28\xad\x3b\x10\x46\x00\x30\x5c\x0e\x28\x99\x61\x47\x76\x62\x1a\xe1\x60\x7d\x5d\x2f\xa5\x13\x0c\x97\x13\xfd\x74\xae\xf1\x07\xb4\x9c\x22\xd6\xb4\x8b\xa5\x7a\x3a\xed\x82\x59\x5a\xeb\x01\x3b\x73\x28\xb1\x80\x80\x99\xb0\xf7\x21\x4f\xe2\x98\x32\xc1\x07\x94\x84\x58\x59\xa7\x6f\xc0\x0c\x46\x1c\x35\x59\x81\xc6\xf4\xdb\xb0\x04\xc7\xde\x13\xfb\x59\x83\x39\x43\x47\x2f\xb8\x2d\xd7\xa7\x19\x6f\xa7\x16\xa8\x12\x55\x2b\x92\x0b\x04\x23\xb1\x98\x04\x0b\x14\xdc\xf1\x16\x7b\xa5\x6e\x07\x54\xbb\xcc\x0c\x57\x67\x19\xf7\x2b\x65\xde\x2c\x72\x7b\x63\x29\x4b\x7b\x60\x90\x30\x86\x88\x88\xd6\x00\x0a\xb0\xa4\x5c\x00\x4a\x72\x2f\x92\xbb\xe0\x14\x55\x6d\xb4\x90\x84\x00\xfa\x0d\x70\x76\x78\xec\xe5\x04\x97\xfb\x9b\x9d\x51\x5b\xba\xf5\x40\x36\xf6\x4d\x0b\x50\xb6\x43\x16\xc4\xf4\x7d\xd6\xbc\xf0\xe0\xc6\xad\x12\xb4\xdf\x2e\x41\xbd\xb9\xe4\x4e\x4c\x88\xf8\xfd\x77\x4e\xee\xf6\x76\xd4\xb1\x09\x3a\x92\x6c\x56\x5c\x4f\xbd\xa4\xa1\x24\x7b\x7b\x0b\xb4\xf3\x1c\x12\x2e\x20\x09\xd0\xa5\xec\x49\x1e\xde\x7d\xdb\xf7\x82\x84\x31\xc5\x44\x5c\xe6\x5e\x94\xad\xb9\x1e\x18\x12\x10\x40\x8e\x00\x9d\x01\x6c\x7a\xd3\xb3\xd7\x5e\x96\x10\xcd\x30\x41\x5c\x6d\x67\x11\x96\x92\xee\x73\xc3\xb6\x91\x4f\x40\x01\x8c\xfd\xc9\xe0\x6c\x86\x83\x1e\xd0\x2a\x1e\xac\x30\x13\x09\x8c\xc0\x12\x06\x0b\x4c\x90\xd3\x68\xc6\x72\x74\x44\x30\x58\xe4\x07\xa2\x6c\xd2\x08\xaf\x64\x43\x6d\x91\xc2\x25\x02\xff\x96\x6b\x0e\xea\x91\xd9\x06\x45\x9b\x0e\x60\x21\x79\xd1\x03\xd7\x14\x88\x7b\x6a\x69\xcf\x81\x72\x02\x99\xbf\xec\x49\x5e\x1f\x82\xa2\x88\xde\x2b\x2f\x8d\xd7\x5b\xc2\xcd\x7b\x2d\xcd\x81\x26\x7a\x4a\xc9\x67\xde\xe3\xef\x28\x03\x86\x0b\xc0\xb2\x41\xb7\xe0\x3e\x61\x0d\x51\x01\x32\x0f\xf1\x1e\xe8\x47\x91\x4f\x13\xfb\x95\x7c\xae\xbc\xd3\xd4\x6e\x5f\x50\x6e\x7c\x43\x29\x91\xbd\xbe\xd4\x96\x81\xc2\x4a\x4a\x96\x77\x9f\x9b\xdc\x5b\x9f\x6c\xdc\x9e\x31\x97\xf8\x4b\x9e\x3c\x52\xd3\xe5\xfb\xf4\xfa\xd2\x8f\x59\xb6\xe4\x5e\xf4\xf7\x05\xd2\xe3\x7c\x11\x51\x18\x4e\xa6\x30\x82\x24\xc0\x64\x3e\xe1\xc1\x02\x2d\xd1\x0b\xa9\x35\xed\xc9\xe2\x54\x9f\x3a\x7c\xd9\xf1\x27\x2f\xd9\x6b\xb6\xfd\xa6\x5a\x16\xbb\x2b\x6c\x1b\x55\xe4\x2d\xd1\x2e\x28\x22\x50\xf0\x73\x78\x6a\x62\x9b\x39\xd6\xaa\x9b\x03\x4d\xd5\xd1\xb9\x1c\x05\x09\xc3\x62\x9d\x37\x45\x1b\x9c\xb5\x4d\x4b\x63\xb5\x02\xc8\x39\x0d\xb0\x5a\x44\xc6\x99\x81\x79\x5e\x91\xb8\xb2\xa5\xdd\x00\xa6\x93\x91\xea\xc3\xa7\x71\xcd\x69\xd6\x6b\x79\x38\x6f\x46\x91\x92\x1c\x09\x81\xc9\x9c\xf7\x82\x08\x23\x22\x26\x22\xe2\xed\xa8\x3b\x50\xed\x6e\x23\xae\xe7\x26\x17\x33\x4c\xf5\xa9\xd9\x4f\x8c\xcd\xc4\xc1\x82\xde\x03\x08\xf4\xab\x00\x5f\xd0\x24\xe7\x76\x80\x89\x58\x20\x22\xb0\x54\x6f\xf2\xd4\x43\x50\xa0\x5d\xce\xc2\x51\xfa\x74\x06\x60\xca\x23\xed\xc0\x48\xdf\xa7\x37\x0a\xaf\xcf\x90\x22\xae\x2e\x1d\xe0\x6c\x86\x02\x21\xbb\x9d\xe1\x79\xc2\x94\xd1\x0d\x12\xed\xf2\xc7\x42\x0d\x5c\x08\x18\x2c\xb4\x73\x3f\xdd\x57\x7c\xf3\xcf\xec\x31\xd9\xd6\xfb\x1f\xfa\x50\x35\xd0\x83\xa5\x4c\x2b\x30\x7b\x2e\x01\x7f\xa2\x2f\x02\x31\x02\xa3\x3f\x73\x5b\xb7\x35\x93\x8d\x3f\xde\x7a\x61\x7a\xff\x91\x97\x3a\x4d\xad\xdb\xab\x71\x0b\xb1\x33\x6b\xd9\x8a\xdd\xc0\xef\xa2\xb5\xfc\x19\x15\x63\x85\xa6\x46\x0e\x1d\x31\xc3\x30\xee\x51\xc9\xd4\xd7\x13\x23\x5f\x38\x6c\x28\x57\x1f\x25\x19\x33\xea\x19\x4f\x25\xe6\x99\xee\x97\x5c\xb2\xcc\xd4\xee\xb2\x94\xaa\xb9\x1d\xe8\x63\x5f\x0e\xc1\x88\x2a\x18\x9e\x2b\x9a\x0f\xfb\xa3\xc2\x0a\xd7\x63\xd5\xcf\x0d\xc3\x46\xa4\x1e\xf6\x47\xc3\x50\xca\xac\x58\xf7\xef\x21\x43\x23\x46\xbf\x18\x82\xb7\xa6\xb3\x24\x57\xab\x35\xee\x1c\x23\x77\xe2\x23\x58\x26\x42\x9e\xf4\xde\x26\xe2\x13\x61\x08\x86\xf2\x0f\xf5\xa0\xd7\x69\x91\xab\x1c\x05\x0c\x6d\xf4\x7a\x85\x98\xdf\x95\x1d\xb4\xcf\xb3\xcf\x3b\x76\x1d\x22\x0d\x2a\xf9\x89\xfc\xf7\xdb\xb7\x33\x39\x81\xc3\x5f\x83\xfc\xdb\xb9\x03\x69\xe7\xd0\x91\xe3\xdf\xca\x8d\x93\x36\x7c\x60\xe7\x4d\x63\x9f\x42\x51\xba\xd5\x5a\x36\x1f\xfb\x8b\xb8\xce\x4e\x30\x8d\xf4\xbe\x25\xbc\xad\x65\x8a\x22\x4a\xe6\x72\x2b\x6a\xe0\xc5\x1e\x15\xc6\xd4\x4c\x03\xd8\xb7\x2d\x21\x81\x73\xc4\x5a\x68\xda\x25\x9c\xa3\x16\x13\x55\xcf\xab\x93\x5b\x76\xf3\x80\x09\x16\x18\x46\xf8\xdf\xe6\x72\x5e\x72\xbe\xa0\x22\x55\xc3\xdc\x35\x48\x9d\xad\xba\xd9\x38\x1a\xe6\x86\xbe\x4b\x9b\xc8\x35\x79\x08\x8c\xf9\x82\x8a\x36\x56\xa3\xe6\xbc\x6d\xa9\x6f\x42\xe4\xae\xc3\x90\x34\x50\xaa\x89\x64\x5b\xec\x94\x4e\xe3\xe2\x04\xf6\x44\x2a\xb5\xe4\x11\x09\xd8\x5a\xd1\x66\x72\x87\xd6\xbd\xbb\x25\x97\xff\x4e\xca\xc6\x5d\x47\xc7\xac\x2b\x49\x9e\x94\x8c\xe6\x63\xb5\x75\x2b\x32\x82\x7f\xd0\x84\xd9\x35\xf8\x82\x3b\x17\xf7\xd9\xcf\x05\x99\x63\x82\xc0\x58\x5d\xf4\x67\x67\xfe\x20\xa0\x49\x6e\xc7\xfd\xe9\x85\xf9\xf6\xe5\xd7\xaf\xa3\x9b\x8f\xff\xfb\x62\x70\x3b\xb9\xfe\xf4\xe1\xed\xc5\xcd\xb7\x6f\xff\xcb\xd0\xc4\x04\x0c\xf4\x30\x5c\xf6\xe6\xe6\x71\xd3\x97\x24\xce\x8b\x9f\xbd\x1e\xd5\xf9\x7c\x01\x57\x08\xbc\x60\x34\x42\xfc\x4c\x11\xf3\x6e\xc9\x7b\x6a\x22\xf4\x0f\xb4\xbe\xd0\x73\x42\xec\x1c\x99\x5f\x5e\xc8\xa9\xda\x80\x17\xaf\xbb\x19\x82\x22\x61\xa8\x07\xc6\xc8\x9f\xe3\x42\x88\x98\xbf\x39\x3b\x2b\x84\x4f\x98\x51\x9f\x85\x34\xe0\x66\x47\x0a\x12\x2e\xe8\x12\xb1\x97\x5a\x65\x84\x2f\x33\x62\x9f\x98\x5f\x27\x70\x42\xd0\xfd\x24\x96\xa2\xc2\x85\xdc\xaf\x15\x83\xe5\xc1\x67\xb2\xa6\x09\x9b\xd0\x7b\xc5\x63\x9e\x17\xe3\xbb\x25\xff\x03\xad\x77\x23\xc4\x7f\x7c\x18\x0f\x2c\x91\x5a\x4b\xb0\x22\xf1\x6e\xa4\x57\xb1\x78\x52\x94\x97\x4d\x32\x6c\xcf\x90\x9e\xac\x69\x31\xb6\xd1\x34\x8e\x88\x33\xf4\xaf\x04\x71\x91\x7e\x35\xc7\x2b\x44\xe4\xfc\x25\x51\x7b\xb9\x8e\x87\x33\x00\xa7\x1c\x11\xa1\x83\x05\x8c\xc4\xa7\x52\x6e\x5e\xd9\x9f\xab\x03\x53\x6e\x00\x98\xab\x31\x14\xb4\x8f\x66\x9b\x69\xdb\xd7\x0f\x37\xb4\x65\x3f\xf8\xad\xb6\xb0\x60\x97\xed\x2c\x58\xb4\x84\x38\x2a\x55\xd7\x4a\xf7\x4e\xd4\x76\x73\xd4\x45\x47\x5d\xf4\x38\x75\x51\x33\x29\xde\x46\x27\xed\x49\x23\x1d\x46\x1f\x6d\x50\x09\x07\x55\x56\x45\x4e\x5a\xcb\xee\xa8\x92\x8e\x2a\xe9\x51\xab\xa4\x7a\x41\x3e\x6a\xa5\xee\x6b\x25\xeb\x2f\xd0\x5e\x1d\x8c\x9a\xc6\x54\x58\xef\x14\xb0\xed\x74\xa0\xbb\x56\x3c\xe9\xb9\x56\xb1\x06\x26\x82\x2e\xa1\xc0\x41\x76\x0e\x56\xe7\x5f\x4c\x49\x8b\x78\x3e\xfb\xc2\x43\xde\x80\xec\xc4\x45\xba\xc0\x88\x41\x16\x2c\x70\x00\xa3\x0a\xc7\x54\x45\x87\x65\x5e\xa9\x0a\x6f\xa9\x75\x7b\x4f\x56\x31\x99\xcc\xa1\x40\xf7\xd0\xd2\xcc\xa3\xea\x85\x79\xee\xf3\xe8\xfa\xd2\x7b\xea\xc0\xe1\x82\x76\xfc\x9f\x63\x62\xc6\x75\x00\xd7\xe9\xfe\x42\x38\x67\x98\xa1\x7b\x98\x5e\xf0\x7b\x2c\x79\xe7\x7f\x77\x60\x46\xd8\x91\x76\x37\x22\x6c\xfb\x40\x7c\x41\xcd\x85\x9d\xbd\x3e\xd3\x53\xcd\x39\x68\xab\x73\x01\x3a\x1e\x79\x5e\xd8\xb0\xfd\x0d\xb9\xa9\xa6\x1f\xce\x52\x37\xa6\xbf\x31\x72\x1d\x49\x91\xc5\xb3\xc9\x9d\x35\x25\xe2\x3d\x8e\x22\xb5\x27\xac\xd5\x85\xa6\xd7\xa5\xdc\x22\x74\x70\x10\xa0\x0c\xcf\x31\x81\x02\x93\xb9\x76\x26\xc3\x2c\x66\x45\xdf\xac\xc3\xe2\x86\x4c\x8a\xb6\x5e\x84\xb9\xe8\x81\x71\xc5\x40\xb3\xec\x87\xd4\x03\x4b\x89\x60\x34\x4a\xc7\x21\x65\xc1\xcf\x61\xb0\xa3\x78\xc1\x81\xd5\x46\x6e\xfc\xfc\x14\x05\x50\x45\x03\x95\x11\x25\x8b\x0e\xf0\xba\xd4\xf3\xc9\x26\x78\xaa\xaf\x95\xdd\xb8\xfc\x9e\x21\xf6\x8d\x4e\xba\xd0\x01\x84\x80\x23\x01\xd4\x7d\x42\x2e\x96\x10\x2e\x11\x10\x78\xa9\xe2\x76\x74\x3b\xdf\x76\xd0\x81\xff\x53\x2a\x5f\xcb\x54\x37\x25\x5c\xf2\xba\xd4\x1c\x73\x18\xa4\x2e\x32\x16\x69\xff\x2e\x09\x4c\x76\x80\x37\xe0\x8f\x37\xc5\x51\xa6\x0d\xb3\x1b\x90\x22\x9b\xf3\x4c\x96\x0c\x55\x11\x4a\x25\x7d\xf9\x93\xd4\x89\x2e\xd9\x65\x7f\x76\x61\x4f\x4c\x9e\xdd\x52\x6a\x25\x45\x06\xaf\xb3\x98\xd1\x18\x31\x81\xd3\x3b\x74\xe4\xea\x00\x4d\x8a\x5e\x39\x5d\x33\x89\xf2\x7a\x54\xd2\xa5\x19\x55\xc6\x9c\x5b\x38\x57\xc9\x75\x7a\xe9\xca\xbf\x1a\xd8\x3f\x5d\xf2\xa9\xe9\xa7\xb6\xd5\x24\xfd\x34\xcc\xad\xb0\x68\x30\x09\x71\xa0\xb5\x00\x47\x3a\xc4\x2d\x0b\x0f\xf4\xa3\xd5\xbc\x2e\x53\x55\x2e\xa5\x74\x09\xd7\x60\x09\xef\x9c\xa4\x27\x27\x00\x44\x32\xc1\xaa\x2a\x15\xf6\xa7\x03\xfc\xfe\xef\xff\xf3\x0f\x0a\x7a\x86\x95\xdc\xae\xe6\x70\xc6\x53\x40\x59\x89\xcc\x2a\x6e\xcb\xd5\x48\x4c\xb2\x65\xf9\x9b\x88\x27\x1e\x72\xd5\xfa\x7d\x55\x68\x5b\x96\x44\xc8\x58\xe0\x7a\x79\x45\x91\x43\x40\x4a\xdc\x84\xcd\x9c\x52\x32\xc4\xea\xbe\x24\xee\xd1\x1c\xa3\xec\x1e\xb2\x10\x93\xf9\x44\x12\xb2\xd4\x2a\x4b\x1f\xb9\xc9\x9e\x78\xd0\x80\x83\xcd\xf9\x8c\x5b\x65\x67\xce\xbc\x49\x75\xd7\xc4\xd3\x24\x69\x61\xe1\xe9\x06\x4e\x14\x19\x05\x0c\x05\x08\x9b\xac\x38\xb5\x1f\xc8\xad\xc1\x46\x28\xcb\x36\x86\x18\xb9\xd5\x61\xb7\x41\x1b\x59\xab\x62\xc7\x54\xf6\x1a\x8c\xe5\xfe\xc1\xb0\xba\xc8\xd5\x99\x3e\xe6\xb5\x74\x2a\x49\xde\x03\xef\x72\x8a\xc0\xc6\xa8\x4e\xc6\x17\x57\xef\x26\x1f\xfa\xd7\xfd\xcb\x8b\x73\x10\x51\x18\x82\x34\xa6\xd5\xc4\xaf\xbe\xbf\xbd\x1d\xa9\xa8\x59\xf9\xcb\xd8\x74\x5c\x0c\x63\x5d\xc1\x08\x37\x0c\x61\xd5\x5d\x64\xe7\xbc\x6d\x22\x3c\x6f\x73\x7d\x14\x9e\x3e\x6c\x20\xab\x7e\x52\x52\x4c\xc5\x50\x6d\x3f\xc3\xb4\x8b\xce\x4e\x70\xbc\x83\x19\x8e\xbb\x3b\xc5\xdb\xc1\xf7\xb2\xd0\xf6\xd0\xc9\xe9\x8d\xc7\x57\xdf\x39\x3d\xdb\x43\x27\xa7\x77\x79\x33\x1a\x7c\xe7\xfc\xd2\x2e\xba\x30\x41\x37\x4a\x2a\x9e\x14\xd3\xf1\xeb\x76\x23\xe7\xc8\x94\x85\x85\x65\x7b\xaf\x36\xde\x30\xd7\x56\x31\x99\x4b\x73\x6d\x8a\x16\x30\x9a\x01\x3a\xcb\x05\xa2\xf6\x2d\x9e\x82\x52\xff\x0c\x71\xc1\x70\x20\x6d\xe3\x29\xe4\x3a\xe1\x43\x64\x5b\x99\xed\xfb\x05\x57\x9b\x8c\xd7\x51\xba\xe1\x00\x9d\x44\x01\x7e\xba\xf8\x3f\x26\x37\x9b\xb2\x74\xa7\xfa\x59\xed\x41\x3c\xa0\x31\x02\x3f\x69\x8f\x50\xde\xba\xb5\x76\xc8\xcf\x55\x49\x1b\xfe\xfe\x66\x5f\x87\x39\xb0\x2f\x3c\x55\xa7\x2f\xd3\x7b\x36\x74\xff\x2d\xd2\x3e\xf1\xe0\x10\xd2\x1d\x19\xda\xa6\xc3\x91\x4e\xa9\x93\xbd\xd9\x51\xb5\xed\xaf\x90\x5f\xa4\x3b\xb2\x79\x31\x1b\x7a\x53\xb6\x7d\x16\x86\x2d\x27\x88\x96\xb1\x58\xcb\x31\x01\x14\xcb\x49\x33\x35\xc8\xd5\x5f\xd2\x57\x2a\x87\x47\xe9\xa1\x3e\x4f\xf2\x8c\xcc\xda\xb1\x32\x55\x3e\x06\x3c\x27\x28\xec\x81\x7e\xde\x18\xd4\x3f\x79\x09\x4b\x43\xca\xd5\x18\xa4\x99\x21\x9b\x16\xe8\x0e\x1c\x13\xd8\x8c\xc8\x36\x34\x27\x17\xdd\x5e\xfd\xfb\x5b\x6b\xae\x67\x89\x3a\x8a\x58\x2e\x4e\x00\x24\xe0\xe6\xdd\xc0\xeb\xcf\x22\x5a\x64\x5e\x97\x88\x92\xb9\x01\xa1\x10\x99\x2f\xef\x4c\xc3\x13\x64\xe1\xdc\xe9\x85\xd1\x26\x92\xf4\xc0\xdb\x35\x08\xd1\x0c\x26\x91\x38\xd5\x20\x18\x35\xfc\xcb\x79\x88\x8a\x6e\xa1\x94\x3b\xf6\xd2\x03\x46\xd1\x5a\x1d\x36\xf5\x19\xd6\xb2\x1c\x0c\x47\xbe\x08\x41\x32\x47\x16\x89\xc8\x4c\x86\x32\xf7\x24\xeb\xcf\xab\xa8\x41\x1a\x66\x32\xe5\x53\x72\xfe\xc9\x29\x19\x4b\xee\xc0\x5b\x75\x92\x92\x1a\x25\x27\xde\xa6\xa9\x99\xe2\x36\x4a\xbd\x5f\x02\x61\x02\x1e\x42\x99\xe7\x95\xf5\x26\x68\x97\xcd\x6a\xdc\x15\x0c\x42\x9d\x4b\xc9\xd4\x39\xe8\x8b\xbc\x32\xff\x53\x11\xf1\x7d\x95\x39\x8b\xdf\x95\x3a\x95\x09\xa0\x52\x5d\x84\xeb\xa9\x5e\x18\x97\x43\x85\x1e\xd7\x40\x2d\x3a\x1f\xc6\xac\x11\xb9\x40\x2a\x45\xc5\xea\x2a\x7f\x71\xb8\xd3\xcb\x39\x1a\xcc\x22\x49\x07\x64\xc5\x3c\x7f\x31\xba\xc1\x2b\xdf\x71\x6f\xf9\x46\x9c\x96\x16\x48\x2c\x1e\xab\x9a\xb1\x67\xb3\x8a\xf2\x45\x4f\xe9\xca\x54\xf6\x52\x0d\x54\xbd\xbf\xe7\x14\xf4\x70\xe6\x09\x96\xe3\x12\xd3\xc9\xb0\x89\xa0\x56\xfd\x2c\x69\x88\x7c\xe1\xf4\x47\x4a\x63\xed\x8e\xe8\x81\xf7\xf4\x1e\xad\x10\x33\x2a\xd4\x41\x3a\x52\x7d\xea\x08\x0a\xbf\x57\x98\xdb\x60\x52\xf2\xd9\x5d\x3d\x1d\x58\xa9\x74\x95\xc2\xe5\x1c\x18\x0f\xc6\xe2\x88\x78\x28\x01\x15\x70\x3f\xe5\x00\x12\x75\x92\xd6\x2f\x47\x22\x68\xe0\xdd\xc0\x05\xbc\x9f\x44\x9b\x8b\xd1\xba\x28\x47\xae\xec\x34\xa5\x6e\x29\xdc\xc5\xae\x29\x7c\x30\xc4\x1d\x6d\x23\x3d\x5a\x5c\x36\x73\x95\xdb\x1d\x2c\x36\x0f\xa2\xe4\x88\xc4\xe6\x76\x77\x44\x62\xcb\xba\x7b\x4c\xba\xe1\x31\x5e\x21\x6c\x87\x14\xf4\x58\x2e\x0d\x8e\xae\x9a\xa3\xab\xe6\xe8\xaa\x39\xba\x6a\xd4\xcf\x63\x74\xd5\x3c\x05\x7f\x0b\xd8\xde\xf3\x74\xbc\xff\xdd\xc3\xfd\xef\xf1\x72\xb4\xe1\x0c\x8f\x97\xa3\xc7\xcb\xd1\xe3\xe5\x68\x83\x09\xee\xe5\x88\x7e\x74\xb6\x37\x71\xb6\xff\x90\xe7\xed\x05\xac\xcb\x69\xe9\x4c\x2e\x4b\x49\xc4\xdd\xea\x90\xf9\x2c\x07\xf4\xb4\xa9\x85\xf1\x79\x74\x0d\x0c\xdf\x14\x58\x5a\x10\xa0\x58\x85\xfc\x5a\x23\x6b\x46\xd9\x23\x91\x6f\x07\x19\x52\x20\x66\xe2\x8c\xfb\x2a\x99\x64\x89\x0a\x69\x80\x96\x7e\x52\x70\x55\x83\x19\x0c\x10\xef\xb9\x6d\x27\x30\x6d\xdc\x90\xbc\xc3\xa5\x01\xdb\xea\xd9\xa3\x1c\xe6\xd2\x20\x4c\xd4\xd9\x2d\x66\x28\x4b\x77\x94\x84\x37\x62\xe7\x29\x93\xfc\x81\x0b\x0c\x47\x1c\x05\x36\xc3\x15\x85\x60\x20\xc9\x03\x86\xce\x38\xff\x4b\x85\x16\x5f\xcc\x6d\x9d\x98\x21\x99\x17\x0c\x7e\x87\x9d\x19\xe3\xed\xfb\xd3\xf9\xeb\x91\xcc\xa5\xe5\xcc\x68\x32\x5f\x6c\x08\x51\x76\x09\x05\x32\x42\x39\x20\x7a\xd7\x54\x80\x20\xc3\x63\xb6\xcb\x01\xc4\xc9\x34\xc2\x41\xb4\x6e\x2e\x56\xc3\x52\x86\x3e\x39\x2d\xea\xc0\x68\x97\xe9\xd0\x22\x6c\x72\x87\xb1\xcb\x1d\x0c\xec\x27\x95\x25\xb8\x10\x22\xae\xe7\x54\x29\xc8\xf5\x81\xb3\x06\x73\xe0\xdd\x4f\x8e\x2b\xbc\x11\x5b\xc6\x9d\xe4\x0b\x7f\xaa\x8c\x71\x11\xec\x2a\xf1\xe1\x1e\xf6\x6a\xb6\x15\xc0\xa2\xc6\x78\xd9\x06\x61\x31\x6b\xd9\x12\x62\x31\x0f\xfa\xd7\x10\x63\xb1\x9d\x4d\x19\x62\x5e\x59\xf6\x48\x67\xa6\x3a\xd8\x9e\xa0\x39\xaa\x9e\x02\x18\xc8\x81\xe9\x69\x54\x42\x7b\xc3\xe2\xa7\x76\xa9\xe2\x7f\x09\x17\x52\x96\x56\x38\x44\xd6\x47\xae\x1a\x9a\x5c\xc0\x35\x28\x71\x45\x33\x78\x7f\x8e\xf9\x5d\xcf\x22\x1e\xd8\x47\xa7\x89\x50\xc7\x37\x95\x5f\x99\x0d\x04\x12\x3d\x8c\xe6\x36\xc7\xb9\x3f\xff\x03\xe7\x09\xe7\x85\x42\x47\x46\x69\xa1\x2f\x62\x37\x36\x64\x96\x66\x4c\x19\xfe\x61\x4b\x5a\xed\x13\xec\x71\x9b\xa4\xea\x22\xa6\xa2\x43\xaf\x71\x29\x8c\xe3\x2e\x50\x23\x5b\x52\x6d\xcf\xd0\x8f\x35\x84\x6b\xbc\x3f\xa9\x59\x3d\xad\x4d\xc9\xaf\x49\x51\x5a\x14\xe0\x11\x6c\x4d\x66\xa4\xdb\xed\x4e\x6e\xe3\xb6\x1b\x94\x6d\xbb\x1d\x18\x70\xbb\x8d\x4a\x3f\x70\x5e\xbd\x5d\x59\x78\x77\xb5\x61\x99\xed\xe0\x91\x28\xf9\x6a\xbc\xa9\xf2\xc9\x6d\x82\x23\x3b\x0c\x02\x55\xe3\x18\xde\x3a\x2e\x4e\x29\x15\x8f\x95\x83\xfe\x36\x0c\x4e\x40\xcc\xd0\x0c\x7f\xd1\x61\x5c\xcf\xf5\xb7\xcf\x75\x08\x04\x14\x2f\x74\xbd\x04\x5d\x9a\xc0\x44\x4a\xa8\xd2\xbb\xd1\x1a\x93\x39\xe8\x8f\x86\x9b\xc9\x93\xc1\x39\x4f\x62\xc8\xe0\x92\xf7\xf2\xa6\x41\xa7\xb7\xea\x7a\x91\xcf\xa6\xfa\x78\xc5\xbd\x91\xc7\x36\xf3\x3a\xf6\x9e\x56\x60\xfe\x86\x89\x76\x31\x46\xbc\x31\x53\xa1\x18\xc6\x8d\x67\x0a\x83\x00\x71\x3e\xd1\x81\x27\x3d\x02\xc5\xc4\x8b\xb5\xd8\x3c\xe7\xb2\x50\x91\xbd\x4d\xb8\x98\x8e\x93\xd9\xd3\x72\xef\x49\x22\x4c\xe6\x3d\x42\x43\x34\x81\xb3\x99\xd2\x3f\x1e\x22\x5d\x75\x78\x83\x72\x7c\x7b\x5f\x14\x03\x4c\x74\x5c\x46\x41\xb4\x36\x01\xf8\x65\xe5\x6e\x3c\xc4\x93\x5e\x87\xc0\xfc\x9a\x62\xb7\x6c\x42\xfa\xeb\x3e\xf4\x5e\x5e\xdf\xb5\x58\x1e\xe9\x27\x8f\x62\x51\xb4\xae\x2b\x92\xc2\xbc\x3c\xfc\xb1\x29\xc4\x0c\x05\x02\xaf\x7c\x5b\x5a\x21\xeb\x4c\xb8\xa0\xea\x18\x33\x99\x51\x36\x49\xe2\x10\x8a\x87\x3b\x6c\x4d\x66\x8c\x2e\x27\xc2\x4c\xfe\x78\xf4\x3a\x1e\xbd\x0e\x6e\xb8\x1f\x8f\x5e\xc7\xa3\xd7\xf1\xe8\xf5\xb4\x8e\x5e\x56\x43\xda\x5d\x76\xf3\x25\x47\xaa\x4f\x73\xfb\x52\x03\x66\xe6\xde\xd3\x0d\x0b\xec\x78\xe2\x3c\x9e\x38\x8f\x27\x4e\xe0\xfd\xfc\xf0\x27\xce\x1d\x9e\x5e\x4e\xc0\xbd\x8a\xb0\x25\x68\x85\x98\xae\x0a\x01\xc1\xa7\x9b\x2b\x03\x9a\xab\x73\x1f\x80\x40\x5c\xa8\xd2\xe5\x2d\xcf\x3c\x65\x65\x02\x1f\xf9\xa1\xc7\x2d\x24\x5f\x5b\x7c\x7a\x3f\x81\x28\xd5\x96\x7e\xf5\xde\x51\x19\x5d\x9a\x93\xf0\x9a\x3d\x62\x83\x68\xb7\xd2\x1f\x1b\xd5\xa5\x9b\x5e\x5a\x28\xac\xbe\x69\xbc\xc3\x62\x69\xfa\x07\x19\x70\x51\xec\xaa\x84\xae\xa2\x5a\xc1\xd6\x0b\x5a\x57\xb9\x7f\x68\x9f\x44\xdd\x1a\x69\xe2\x13\xc8\x99\x59\x0f\xea\x1b\xd8\xc5\x51\xb9\x63\xe7\xab\xd2\xe3\x53\xcd\xb8\x0b\x31\x28\x8f\xfc\xb4\xa3\xa6\xd6\xb6\x28\x66\xb7\x0f\x3f\xc7\x53\xc0\xf1\x14\xf0\x88\x4f\x01\x4f\xdd\x10\x3f\xf8\x49\xc7\xd7\x7d\x8f\xfb\xf2\xa9\x79\xfc\x5a\xce\x84\x78\x62\xa1\x6c\x9b\xd2\x95\x9a\x27\xd1\x74\x28\x1d\xaf\x3c\x7b\xab\x73\xa9\x79\xd9\x1d\x2e\x4d\x04\x72\x41\x05\xea\x42\x49\x75\x02\xd7\x8d\x6a\x06\x04\x4d\x91\xa0\x66\x94\x81\x70\x4d\xe0\x12\x07\xaa\x53\x95\x43\xab\xf2\x6e\xf5\x2b\x54\xa0\xb4\xd7\xdb\xb4\x1a\x6c\x05\xf3\xcd\x7c\x57\xf8\x0a\x5e\x67\x1b\x1f\x37\xa5\x83\x3c\x54\x0e\x9b\xfc\x95\x0f\xcd\xf6\x32\xc3\x7c\x9c\x11\xf0\x3f\xec\x20\x0d\x34\x97\x81\xfa\xca\x51\xc5\xef\x8e\x3b\xf0\x1d\x85\x22\x50\x9a\x34\x6d\x6a\x40\x95\xbc\xe2\x30\xd1\xca\x38\xe6\x28\x98\xd8\x04\xe9\x49\x0a\x05\xd7\x50\x94\x9c\x5c\xc3\xdb\x0c\x87\x07\x71\x5b\x1d\x68\x85\xc0\x14\x21\x92\x81\x95\x95\x21\xba\x54\x64\xf1\xf5\xc0\xa7\x22\x16\x61\x49\xb2\x9f\x7c\x95\xd7\xdf\xc2\x20\xfd\x38\x15\x21\x35\x24\xa5\x94\x4a\x05\x59\x96\x43\xbc\xf1\x87\x6e\x11\x3d\x6e\xde\x0d\x0a\xd8\x35\x1a\x68\xcd\x24\x56\xea\x62\x85\x64\x0e\xde\xf7\xbd\xee\xdc\x74\x56\xba\x42\xba\x80\x50\xc5\xe8\x53\x68\x4c\x67\x42\xb9\x8c\x70\x07\x37\xc6\x14\x34\xd5\x20\x3b\xf9\xc1\xa9\x94\x4e\x92\xe6\x75\x9a\x11\xbc\xe0\x79\x98\x9a\x02\xcc\x4d\x19\xb0\x8d\x3f\xe3\x8a\xf4\xcb\x05\x62\x28\x47\xca\x77\x2a\x17\x1f\x2e\xe3\x08\xa5\x53\x7b\xdf\xcf\x0d\x28\x4b\xf5\xfc\x45\xa5\xa4\x42\xcc\xb2\xfa\x90\x4d\x44\xc3\x4c\xb5\x9c\x0e\x69\xcd\xa6\xcf\xa3\x6b\x7f\x11\x15\x32\x4d\x7f\x69\x48\x84\xf2\xe9\xa7\x69\xac\x55\x02\x5c\x82\xb0\x9a\x07\x30\xc8\x70\x56\xeb\x44\x5c\x8a\xb5\xd7\x5f\xb5\x78\x9f\x02\xb9\x23\x31\x20\x55\x5c\x4e\x50\x7d\xe1\xf4\x21\x01\x37\xcd\xe5\xb4\x9c\x93\x39\xd4\xa4\xfc\xc6\x90\xa3\x6a\x8a\xe8\x55\x56\x37\x2d\xa6\x34\x6a\x9e\xb7\xf0\xa0\xa7\x84\x87\x4b\xbf\xf5\x4f\x7b\x1b\xce\xbd\x07\xce\x17\x34\xe3\x7c\x52\x66\xac\x3d\xc7\x22\x12\xc6\x14\x13\x51\xed\x37\x37\xac\xb8\x30\x0f\xee\xdf\x7d\x9e\xcb\x86\xfe\x77\xab\xfa\x4e\x39\x3f\x2c\x29\x19\x7c\x77\x0d\xdb\xef\x2a\xe2\xa9\x0d\x3b\x18\x45\xe9\x87\x96\xb7\x69\xec\xc5\xf5\xc5\xa5\x81\x6a\x51\xe6\x86\xaf\x50\x9e\x1b\xc0\x95\xe7\x56\x7c\x32\x80\xd8\x19\x48\x48\x29\x14\xf5\x13\xa8\x0a\x5a\xe6\x74\xda\x44\xf5\x8f\x06\xef\xdb\x43\x3b\x6f\x4b\xfd\x3c\x09\xab\x1c\x6c\xdd\x76\x98\xed\x5f\x3b\xc5\x08\x39\x68\x78\x65\x22\x34\xf2\x9e\x78\x80\x8d\xe2\xeb\x57\x33\xb8\x6f\xdf\x4a\x2e\x6b\x79\x32\x7d\xb9\x44\x62\x41\x43\x8d\x91\x67\x9e\x3d\x55\xd5\x38\xc9\x0b\xe1\xdc\xe0\xb6\xd4\x3d\x2d\xaf\x6a\xcb\x1e\x6f\xad\x7d\x1e\xe9\xf2\x06\x20\x86\x0c\x11\xe1\xa9\x60\x6f\xaa\x52\xae\x26\x15\xf3\x95\xdf\x5d\x3f\xb2\x39\xf3\x3b\x1c\x0f\x97\x31\x65\xa2\xfe\x16\x50\xf9\x3e\xab\x77\x7b\x1a\x1e\xf4\x86\x3c\x73\xc2\xd3\xb0\x2e\xa4\x4c\xcd\xa4\x24\x8e\xac\x76\xd3\xa4\x21\x02\xb6\x5d\xa6\xbc\xd5\x89\x40\x7d\xa7\xa8\x63\xeb\xf4\xf6\xda\x08\x5e\x8d\x8c\x38\x73\xda\x8b\xa0\xb4\xb7\x8f\x2c\xbb\xbb\x61\xdc\x56\xee\x1e\xad\x36\x8f\x12\xb1\xa8\x63\xc3\x36\xd7\xdb\xee\x25\xc2\x7a\xb2\xc7\xca\x05\x25\x2e\x5b\x77\x75\x74\x83\x75\x3b\xda\xf9\xcd\x23\x13\xcb\x90\x32\xfe\x8d\xf4\x33\x1f\xfc\x47\x72\xbb\x74\x4a\xbe\x6c\x73\x16\x94\x82\x05\x64\xa1\x06\xc2\x85\x9c\x12\x00\xa7\x34\x11\x60\x8d\x8a\x81\x1e\x7b\xdb\x8d\xf7\x47\x3b\x2d\x1d\x93\xf2\xb2\x2a\xf5\x85\x4a\x1e\xe4\x34\xb7\x8b\x82\xbd\x7e\x71\x99\xee\x1e\xe7\x5c\x74\xa4\x36\xf0\xfb\xba\x1d\x50\xed\xd2\x77\x69\x4f\x9f\xfb\x95\xc2\x0c\xcc\xfb\x10\x4b\x19\xdc\x03\x83\x0c\x15\x4e\x80\x25\xe5\x02\x50\x92\x7b\x91\x2e\xb2\xef\x75\xe6\x20\x72\x42\x12\x02\xe8\x37\xc0\x1c\x30\xf4\xaf\x04\xe7\x2e\x09\xaa\x6f\x50\x1d\x84\xb2\x6d\x60\x5f\x8b\x38\x52\xd9\xcf\x61\xeb\x61\xee\xe0\xdc\xae\x8c\x0f\x23\xd8\x69\x61\x78\x6b\x81\x80\x26\xb5\x4d\x32\xe7\xea\x7d\x2d\xa6\x3d\x47\x02\x08\xea\x75\x63\x61\xb9\x9f\xd6\xf1\xde\x50\xb4\xe7\xda\xbb\x75\xfc\x49\x57\xb5\x2a\x21\x5f\x1a\x3e\x0a\x28\x6b\xe0\x1e\xd3\x3f\x19\x3c\xe3\x90\xa8\x18\x5d\xb7\xac\xbf\x31\x35\x15\xf7\x43\x34\xc3\xc4\xa0\xf0\x46\x58\x2e\x51\x7f\x8d\x64\x95\xec\x95\x87\xdc\x14\xea\xb1\xe0\xed\x3a\xff\x0d\xac\x30\x13\x09\x8c\xc0\x12\x06\x0b\x4c\x90\xd3\x68\xc6\x72\x12\x8e\x60\xb0\xc8\x0f\xa4\xbc\xa6\x84\x34\x16\x6d\x45\x09\x5c\x8c\xe1\x34\xfc\x03\x58\x48\x5e\xf4\xc0\x35\x05\xe2\x9e\x5a\xda\xeb\x8a\x6a\x05\xd9\x56\x95\x7d\xa2\x88\xde\xab\xcb\x10\xaf\xb7\x84\x9b\xf7\x5a\x9a\x03\x4d\xf4\x94\x92\xc5\x5b\x18\xc3\x05\x60\xd9\xa0\x5b\x70\x9f\xb0\x86\xa8\x99\x67\xa6\x07\xfa\x39\x37\x7e\xe6\xb4\xa1\xb3\x8a\x4e\xd3\x2b\xb3\x05\xe5\x42\x97\x30\x29\x46\xc1\x02\xbb\xf3\xa1\xb0\x92\x92\xe5\xdd\xe7\x26\xf7\xd6\x27\x1b\xb7\x85\x93\x96\xf8\x4b\x9e\x3c\x52\x45\xe7\xfb\xf4\xfa\xd2\x8f\x59\xb6\x54\x15\xc2\x78\x21\xb5\xc6\x24\xd5\x1a\x13\xad\x35\x5e\xf8\xa5\x30\x94\xd2\xf1\x65\xa7\x88\xcb\x6f\x12\x22\x9b\x6e\x0f\x5e\xcc\xec\x36\x1b\x44\x59\x84\x77\xf6\x73\x58\xd8\xf3\x32\x47\xf4\x36\x73\xac\x55\x37\x07\x9a\xaa\x7b\xcb\x0e\xe3\x1e\x85\x89\x58\xbc\x9e\x04\x11\x46\x44\x4c\x70\xd8\x50\xf5\x7e\x94\x62\xf5\xa7\xbd\x30\xfb\x33\xbb\x3d\x4c\x45\x49\x21\x80\xd9\x3b\x62\x25\xaf\x69\x06\x6f\x4e\xa0\x3f\xf6\xe5\x10\xc0\x40\x0d\x01\x0c\xcf\x35\xb4\x6f\x7f\x54\xd8\xe0\xf4\x58\xf5\x73\xc3\xb0\xd1\x3e\x37\xec\x8f\x86\x0a\x36\x5d\xac\xfb\xf7\x90\x21\x85\x43\xaf\x7b\x68\xbd\xe9\x49\x72\xb5\xda\xf0\x1c\xe3\x7a\x07\x67\x09\x00\x4c\x50\xc3\xdb\x44\x7c\x22\x0c\xc1\x50\xfe\xa1\x1e\xf4\x3a\x2d\x72\x95\xa3\x80\xa1\x26\x07\x12\x07\xd8\xb0\x2a\xdc\xba\x63\xb5\x02\x4b\x0e\x1d\x72\x12\x87\xaf\x19\x98\x3f\x3e\xb5\x4a\xa2\x77\xd8\xb1\x55\x1e\x7d\xbe\x7d\xcb\x54\xfa\xba\x0c\xfa\x86\xa7\xaa\xa2\x5c\x6b\xb7\xad\xfe\xb8\x1d\xc6\xa0\xbd\xe9\xca\xaa\x9c\xa5\xc5\x72\x8c\xed\x0d\x04\x6d\x70\x4f\x3f\x2a\x8c\xa9\xd9\xda\xb7\x6f\x5b\x42\x02\xe7\x88\xd5\xe6\x10\x64\xd7\x56\x45\x24\xc3\x5d\xe0\x29\xaa\x28\xd5\x3c\x65\x79\x29\x7c\xe3\x77\xba\x42\xf7\x0c\xc5\x98\x8b\xbc\xad\x4a\x38\xc8\x89\x64\xed\xa9\x4d\x9a\x70\xa6\xe4\x95\x13\x90\x72\x87\xd6\x5a\x82\xe4\x46\x25\x28\xcb\x6c\xbe\x4b\x35\x32\x13\x69\xf7\xc7\x87\x71\x81\xb6\x65\xf9\x12\x87\xc9\x78\xa8\x06\xf5\x3c\x12\xaf\x86\x78\x3b\xd9\x85\x17\x18\x31\xc8\x82\x05\x0e\x60\x54\xa1\x01\x2b\x3a\x2c\x53\x7f\x9b\x37\xe4\x8e\xe2\xde\xef\xc2\x37\xf8\x54\x51\xf0\x0d\xe7\x9a\x87\xed\xdc\xa8\x06\x0f\x1e\xbc\xb3\xdd\x6d\xc6\x4e\xa3\x74\x5c\x67\xfb\x09\xb8\xfd\x78\xfe\xf1\x0d\xf8\x69\x7a\xf6\xfa\xd7\x5f\x5f\xbd\xfe\xf5\xaf\xaf\xfe\xf3\x67\xa0\x6d\x26\xf9\x0f\x41\x81\xa0\x0c\x0c\xae\x86\xca\xff\x49\xbc\x02\x1a\xe6\xd0\x51\x4f\xd1\x5e\x73\x61\x69\x76\x2f\xef\x67\xe6\x94\xe6\x09\x2b\x5d\x34\x61\x09\xe9\xe5\xcb\xa7\x57\x2b\xe2\xdd\x1d\xaf\x9c\x50\x68\x13\x46\x9e\x90\xd4\xb5\x83\xb9\xa9\x2f\x88\x49\x66\x4f\xd1\x59\xf6\xa4\xab\xc9\x95\x69\x62\x1a\x5a\xcf\xca\xab\x97\xbf\xfd\x0a\x82\x05\x64\x30\x10\x88\x71\x20\x2d\x31\xed\x82\x96\xcb\x21\x5a\x6b\xb4\x99\x9b\x77\x83\x57\xbf\xfc\xfa\x57\xb7\xb3\x0b\x1d\x8c\xab\x73\x75\xde\x80\xe7\x2c\x21\x2f\x4d\xe7\xcf\xb3\xe7\x4a\x53\x94\x6f\x12\x52\xac\xdb\x5e\x9f\x9d\x2c\x19\xd0\x22\x33\xd9\x59\x29\x86\xc7\xb3\x84\x04\x92\x55\x1b\x98\x6c\x1f\xe9\xd9\x5f\x0e\xc5\xea\xbe\x34\x1a\xd9\x4b\xed\x4f\x0b\xbd\x1d\x5d\xf3\xf6\x9d\x1d\x69\x8e\xc1\x76\xe0\xba\x89\xaa\xa2\xca\xd1\x4b\x8e\x08\xc7\x02\xaf\x90\x62\xed\x26\xe6\x6f\xe2\xb1\xec\xfb\x55\x1d\x77\xd5\xf0\xec\xe8\xf8\xbb\x3c\x1d\x1b\xe5\xa1\xcb\x2e\xec\x44\xea\xac\xa9\x1a\xa6\xb7\x01\x00\xa8\xe6\xb1\x97\x8f\xe0\xc6\x5a\x2b\xef\x5c\x9a\xe1\x32\x1a\x0f\xf2\xfc\xf8\x74\x73\x65\x19\xb7\xb3\x08\xc2\xf6\xf1\x83\x9d\x84\x36\xd8\x10\x8f\xb7\x29\x68\x70\xd7\x5c\x4a\xc3\x0c\x1d\x56\xed\x24\xe2\x70\x13\xd1\x4b\x03\x0a\x1f\x84\xee\x78\x4e\xa4\xe9\x9f\x77\x3b\x9d\x80\xbe\xf2\xc4\xff\xf5\xec\xd5\xdf\xce\x5e\xbf\x56\x93\x52\x64\xb3\x37\x07\xba\xd2\x97\xa4\x9f\xba\x0e\x61\x11\xe2\xfc\x1c\xc5\x11\x5d\xab\x9c\x04\x69\x2d\xcc\x28\x5b\x4a\x72\xff\x09\x63\x6c\x62\xf9\xcd\xf0\x60\x8c\xb9\x1c\xe2\x9f\xce\xfb\x7c\x16\x99\x61\x81\x29\x0a\x60\xc2\x11\xe8\x8f\x86\x97\x59\xe9\x36\x8d\xc0\x92\x29\xd4\x74\xaf\xb3\xce\x04\x81\x97\x6e\xf8\x59\x36\xc4\x49\x98\x8e\xb1\xf9\xbb\xe3\xf8\x82\xcc\xd5\x9d\x4e\xfb\x57\xc3\x38\x9e\x20\xd5\xba\xd2\x52\x6d\x05\x89\xb1\xd9\x4a\xe5\x3c\x9a\x04\x88\x09\x3c\xc3\x41\x45\x0c\xce\x78\x7c\x35\x28\x3c\xd1\x21\x93\x94\xf3\xc8\x19\x5f\xf7\x1c\x7f\xe5\xeb\x45\xb3\x65\xa2\x31\xf9\x1e\xea\x4c\xa2\xe9\xae\xeb\x74\xc5\x4e\x41\xd6\x06\x55\x93\x1f\xcd\xb1\xd2\x94\x55\x16\x22\x96\xc3\xc7\xdd\x0b\x3a\x51\xdb\x56\xc2\xa2\x0f\xb0\x0a\xb9\x20\x61\xd1\x64\x09\x9b\x5e\x82\xf7\x33\x6b\xd0\x66\x94\x1a\x4e\x7e\xba\xb9\xfa\x00\x63\xa7\xaa\xf8\x02\x0a\xef\xee\x7a\xa9\x99\xe9\x75\xa7\x32\xa2\xe4\x56\x66\xfa\xca\xc5\xa7\xd4\x3b\x70\xbc\x97\xb7\xf6\xe0\x74\x3c\xdc\xbd\xb8\x90\x78\x93\x95\x34\x7e\xe4\x4b\x89\x1f\xd7\xd2\x8f\xba\x96\x7c\x7f\xb3\x6f\x2e\x34\x0d\x95\xeb\xa7\x01\x1d\xa5\x36\x85\x13\x39\xa7\xc8\x0a\x59\x5a\x79\xcb\xeb\x06\x26\x62\x81\x88\xd0\x6d\x4c\x2e\xa8\x94\x59\x30\x45\xe2\x1e\x21\xa2\xce\xbc\x5c\x1d\x51\x73\x31\x55\x88\xf5\x40\xdf\xbf\xc1\x88\x10\x34\xb1\x75\xe3\xf1\x15\x70\x26\x95\x1e\x6f\x4b\xf3\xa8\x6a\xee\x4a\xca\x6c\xa5\x03\x30\x4c\x1f\x8d\x78\xa4\x41\x48\xaa\x80\x62\x78\x94\xbf\x58\xdc\x76\x75\x8c\xc7\x57\xfa\x55\xb9\x05\x62\xd2\x98\x7d\x2e\x72\x4e\x03\xac\xe2\x6d\x94\x73\xc8\xe9\x26\xaf\x30\xdd\x88\xac\x99\xce\x4b\x46\xe2\xd4\xeb\xad\x49\x6b\x3d\x0c\xd9\x5c\xe7\xf4\x90\xb5\xe2\x79\x61\xee\xa0\x02\x4c\xa1\x96\xe7\x87\xc4\x7a\xd9\xfb\x5e\xe7\x6b\xd0\x6a\xbd\xf4\x68\xb6\x35\xbd\x5f\x74\x6e\x2f\xcb\xae\x41\xb5\x77\x66\x92\xf7\x16\xd7\x5f\xdc\x99\x96\x66\x7f\x71\xd6\x62\x1a\xbb\x6a\x2a\x5d\xab\x0b\x64\x86\x02\x15\xf4\x26\x57\x16\x41\xf9\x18\x46\xb9\xb0\x16\x94\x8b\x9b\x24\x42\x1c\x2c\x25\x19\xfc\xf2\x94\xef\x0c\x9c\xc1\xa5\x4a\xbd\x56\xdb\xdc\x07\x18\x9f\x02\x2c\x00\x5f\x28\x24\xd4\x29\x02\xb0\xa8\x32\x4c\x54\x9b\xd7\x97\x75\x2b\x53\x96\x46\x1f\x4e\x93\xe0\x0e\x89\xf2\x57\xde\xd8\x84\xfd\x5d\xbc\xd4\x7f\xc5\x70\x66\xa9\xa8\xb0\x50\xfa\xda\x19\x8a\x39\x80\xa1\x2e\x15\xa0\xe0\x5d\xdc\x70\xeb\x70\x25\x37\x98\xd0\xc2\xd2\xf8\xba\xce\x6c\x50\x11\xbe\xd3\xae\xbc\x1b\x74\xcf\xb0\x40\xfc\x14\x20\x11\xf4\x80\x80\x77\x08\xa0\xd9\x0c\x05\x02\xc4\x0c\x53\x85\x79\xc3\x11\x09\x75\xc4\xb8\xaf\x38\x19\xfa\x57\x82\xb8\xc8\x4d\xa9\x07\xde\xd3\x7b\xb4\x42\x4c\xe1\x5a\xe4\x24\x00\x3b\x18\x11\xbe\xde\x2c\x99\xa4\x89\x57\x34\x0a\x45\xa9\xc9\x7b\x84\xe7\x0b\x81\x42\xdf\x70\xe1\x3e\xc9\x06\x94\x48\x6d\x87\xa2\xb5\x1a\x02\x73\xba\xb4\x2f\xe7\x9b\x7a\x3b\x2d\x15\x05\xb5\x0b\xab\x6a\xa5\xee\x4e\xac\xbd\xe5\x5a\x5c\x73\x93\x2d\x9d\xdf\x27\x16\xdd\x20\x2d\xea\x52\xb8\x8a\xb3\xee\x95\x0f\xca\xeb\x2b\x35\x08\x7c\x79\x3c\x01\x9f\x38\x02\xcf\x55\x98\xe4\x73\x03\x56\x34\x45\x42\xa1\x11\x21\x36\x77\xf7\xb7\x79\xb6\x46\x96\x52\x5c\xd5\x6a\xf4\xba\xb2\x25\x29\xc0\x92\x2a\x1f\x3f\x24\x6a\x96\x8e\x20\xaf\x63\x15\xea\xfb\x67\x4e\x3d\x98\x0b\x83\x66\x81\x9b\x7e\x3e\xc6\x36\x51\x8d\x1b\xc8\x04\x0e\x1e\xcf\x18\x43\xb1\x98\x28\x55\x85\x58\xef\xbb\xd5\x68\x2a\x8a\x06\xde\x26\x53\x94\x8a\xa7\xf2\x65\x5a\x39\xda\x5b\x96\xe9\xba\x98\xe5\x31\x82\x62\xf1\x41\x8f\x48\xae\x46\x3d\x38\xf3\xa8\x52\x09\x2f\xb8\xea\x09\x48\xee\xe3\x7c\x09\xe0\xa7\xa8\x63\x0f\xb2\x6c\xbc\x46\x7f\x6e\x92\x93\xe3\x7a\xaa\x58\x4f\xea\x0f\x96\x44\xa8\x70\x8d\x5d\xbf\xa2\xf2\x29\x05\x75\x06\x89\x0e\xf0\x4b\x22\x94\x07\x9d\x32\xeb\xe7\xe9\xaf\x12\x65\x89\xb0\xed\x4c\x10\x6b\x76\xf8\xe7\xdd\x2d\x4d\x10\xc7\xec\x28\xd8\x87\xa5\x26\x08\x2f\xb3\x3d\xbc\xa9\x68\x43\x23\x7f\xf0\xaa\x35\x3a\x36\x18\x1a\x3e\x35\x1b\x19\x1d\x65\x86\x46\x69\x37\x39\xa3\x23\xc9\x2c\x8a\x53\x47\x5a\x7c\x4b\xad\xd6\xbe\xe8\x94\x4d\xb1\x41\x39\x16\x16\xfd\x51\x3d\xd6\xaa\x47\xc5\xfd\x89\x5e\x83\x3d\xb3\x7a\x26\x4b\xcc\x18\x65\xc6\xdf\xd2\x2b\x4f\xfc\x05\xb5\xe9\x6e\x26\x87\xb3\x54\xa7\x3a\xf1\xd0\x72\xe9\xea\x17\x2a\x27\x5a\x21\x1c\x73\x13\x5f\x36\x3b\x3b\x36\xf0\xe3\x30\x1e\xc9\x26\x3c\xb0\x4b\x30\x9f\x6f\xcd\x77\xc2\x07\x6b\x2d\x56\xf1\xa3\x07\xde\xa2\x99\x5c\x80\x33\xca\xee\x21\x0b\xf3\x67\x44\x5f\xc7\xe6\x78\x73\x9a\x7a\x32\xad\x23\x13\xc0\x38\x8e\xac\x72\x63\x28\x42\x2b\x98\x4b\x65\x59\x20\x18\x22\xd6\x37\x47\xcf\x2c\xd9\x14\x4a\x4b\x93\x09\x6d\xbf\x66\x39\xac\xe6\x4d\x7f\x57\x44\xfa\x61\x44\x45\x0b\x88\x94\x15\x7e\x5c\xb0\xdd\xe3\xc2\x71\xc9\x1e\x97\x6c\xf8\x46\x55\x4b\xda\xc1\x71\xc3\xbf\x94\xc8\xcc\x73\xef\x34\x8e\xb9\x7e\xdd\x93\x3f\x5a\x1c\xc8\x6f\xe5\xf2\xf2\x07\xb7\x22\xf7\x79\x67\x23\x29\x0c\x9d\x33\x51\xbe\x38\x41\xee\xeb\xd6\x38\x54\xce\x1b\x0e\x70\x85\xb2\x9f\x4b\x26\x9d\x7b\xf0\xd3\xf4\xec\xf5\xab\xdf\x5f\xff\xfe\xeb\xef\xbf\xff\xfa\xf3\x1b\x30\x54\x81\x5b\x80\x2f\x20\x43\x72\xbb\x11\xf2\x68\x6d\xa3\xfe\x66\x94\x01\x42\xef\x41\x42\x04\x8e\xc0\x8c\x46\x91\xfc\x23\x4e\xfb\x53\x2b\x28\xf5\x76\x20\xb8\xd4\x50\xed\xcb\x38\x42\x4e\x96\x7d\x69\x6c\xd8\x4b\xfd\xca\xf4\x8d\x95\x32\xb3\x9b\x30\x40\xb7\xce\x45\x69\x14\x4b\x69\x41\x8b\x0e\xc5\xff\x31\x77\x7c\x07\x89\x51\xd9\x13\x6b\xa4\x3d\x54\xca\x10\xe7\x8b\x03\xe3\x2d\xab\x31\x76\xf7\x26\xf5\x3b\xd0\x7e\xb2\x34\x63\x39\xc5\xd4\x82\x2b\xb1\xcd\x1f\x35\xea\x0e\x41\x5f\xc4\x64\x41\xe3\x49\x09\x58\xcc\xc6\x02\x0b\x16\x50\xc4\xb5\xa8\x16\x90\x84\x91\xb1\xaa\xe4\x89\x26\x96\xdb\xae\xe0\x25\x14\xfb\x22\xde\xd3\xd8\xf6\xd1\x86\x72\x25\x75\x09\x0f\x4f\xba\x68\xda\x90\x6a\x7d\xe7\x40\xa1\xdd\xdb\xd2\xb0\x97\x06\x92\x3c\x2e\xbc\xb5\x40\x2e\xaa\x4e\x11\xfa\x9f\x16\xc3\xc5\xa5\xb1\x7f\x6a\xa8\xa0\x37\x00\xd7\x54\x20\x47\x86\x03\x65\x95\x69\xf0\x29\x15\x58\x65\x71\xa7\xbc\xee\x42\xc4\x05\x26\x6a\x67\xbf\x51\xd5\x15\x30\x07\x10\xc4\xc9\x34\xc2\x01\xf8\x89\x50\xf2\xd2\x56\x35\xf8\x19\x0c\x47\x60\x30\x3c\xbf\xd1\x65\x18\x2a\x19\x7c\xf5\xb6\x0d\x6f\xdf\xa5\xb4\xb9\x49\xa2\x4e\x71\x78\x15\x93\x89\x48\x08\x41\x51\x0b\x55\x62\x66\xf5\x79\x74\x7d\xab\x9a\x36\x59\x2a\x15\x84\x4c\xfb\x68\x43\xce\xb4\xd1\x61\x28\x59\x11\xf7\xae\xb6\xbb\xbc\xd2\x99\x48\xdb\x72\xff\xb6\xb1\x5b\xdc\xa6\xba\xec\x4d\x97\x4c\x1b\x35\xae\xa7\xb2\xbf\x96\x84\x0f\x96\x83\xeb\x99\x92\x4e\x25\xa8\xbe\x8f\x72\xc3\xdd\xb7\x44\x67\x15\x0e\xab\x65\x7b\x98\x7b\xe4\x41\xe0\xd7\x53\x39\xfe\xfa\x55\x8f\xb4\x12\x88\x5d\x1d\xaa\x54\xfe\x98\x2d\xfd\xa5\xc2\xab\x29\x79\x21\x00\x0c\x44\xa2\x6e\x3c\x3b\x0c\xc8\x5e\xa8\x73\xd6\xcd\xfa\x5b\x75\x60\xeb\xa5\xfb\x9c\x2e\xa9\x18\x93\x47\xb7\x03\xe5\xca\x5f\x6f\x2a\x06\x98\xce\xb3\xbc\xc0\x5e\x3b\x0b\x75\x43\x29\xc1\xdd\x52\x60\xef\x8a\x85\xc0\xd2\x6a\x89\x5a\x4a\xaf\xfb\xb7\x5d\xde\x31\x7f\x34\x8d\xd3\xaa\xb2\xa2\x53\x5d\x10\x73\x70\xdd\xbf\x4d\x0b\x8e\x3d\xa6\x72\x82\x75\xea\x2c\xcb\xc2\xee\xb5\x44\x40\xf2\xf3\xb7\xaf\xfb\xb7\x4f\xa4\x0e\x8c\x6f\xbd\xa9\x02\xc8\x4d\x73\x78\xa4\x90\x0c\x47\xdc\x04\xc3\xac\x60\x84\x75\xe8\x38\x14\xc3\xb8\x6f\xea\xa9\xe9\x54\xf8\x0c\xb6\x18\x7c\xe8\x5f\x7f\xea\x5f\x4d\x3e\x5e\x5f\xfd\xe3\x91\xd4\x50\xf3\x23\xf1\x19\xc4\x64\xd2\x8e\x4e\x59\xae\xd3\x70\xe4\x26\x36\xa9\x6b\x16\xd5\xa1\xb9\x85\xe4\x48\xd2\x33\x0d\x04\x52\x14\xf5\x7a\xe2\x02\x0a\x1c\xb8\x45\xe9\x0a\xf5\x2b\x21\xe7\x78\x4e\x6c\x91\x42\x94\x97\xd4\x0e\xd3\x79\xef\x7b\x57\x8c\x36\x9d\xf5\x46\xa8\xdb\xe7\xbd\xe3\xee\x95\xfd\xd4\xec\x5e\x6f\x2f\x47\xaa\x12\xd0\x93\xdc\xc2\xf2\x47\xbb\x26\xb4\xca\xea\x88\xaa\x9c\x53\x4b\x1e\xcc\xf3\xa9\x77\x15\xe4\x49\xcf\x8a\xed\xe9\x94\x3f\x66\xee\x9a\x60\x27\x60\xc4\x10\x5a\xc6\x02\xaf\x90\xae\xb2\xbc\x42\x4c\x85\xbc\x2a\x00\xf8\xf4\x9e\x3f\x73\x34\x84\x88\xc7\x58\x68\x52\x44\x30\xb8\xd3\xc5\xbe\xee\x08\xbd\x27\x4a\xdb\x9c\x3a\x7d\x43\xb7\x70\x28\xe2\xd6\x57\x87\xa4\xa4\xeb\x25\x05\x05\x58\x62\x82\x97\xc9\x52\x5d\xc1\x4f\x23\xa4\x2b\x94\xd8\xb7\x41\xf0\x47\x32\x45\x8c\x20\x37\x37\xf6\x04\x8c\xb3\x9b\x7f\x8b\x46\x96\xd1\xfe\xc4\x79\x30\xf3\xdb\xa6\x7d\xaa\xad\xc3\xd7\xd8\x2a\x41\x16\x13\x7f\xc7\x38\x01\x61\x82\x9c\x67\x33\x3d\xe7\xe0\xb0\xb8\x81\x2c\x27\x6a\x43\x31\xf8\x33\xaf\x7e\xfb\x5b\xef\xf5\x5f\xff\xd2\xfb\xa5\xf7\xcb\xd9\xab\xdf\x54\xd6\x5d\xf4\x52\xaa\x6b\xed\xe1\x35\x37\xff\x4a\x86\x48\xc9\xab\xf3\xe3\x23\xa1\x6b\x42\xe9\x5a\xbd\x33\xca\xc0\xe5\xc0\x01\x75\xf6\xca\x3d\xdb\x12\xcf\xae\xd2\xaf\x8c\x08\xb0\x5b\xa2\xf7\x5d\xb1\xc6\x3f\x17\xcc\x8f\xdc\xd9\x78\xa3\x91\x95\x7c\x35\x09\x1e\xd9\x3a\xc2\x84\xe3\x30\x07\x73\x39\x32\x80\x35\x7e\xf0\x86\x32\x8e\x86\xa3\xd5\x5f\x3c\x00\xaf\xde\xde\x37\x3d\x8e\x82\x84\x61\xb1\xde\x70\x8f\x3b\x36\x8f\x78\xf7\xb8\x8d\x6f\xfa\xb8\xdb\xfa\x20\xb7\xac\xfb\xd9\xa7\xf7\xc8\x12\x1d\x38\xb0\x8a\x83\xc9\x82\x72\x31\xf1\x7b\xf3\x59\xa3\x1e\xfd\x3c\x1a\xbc\xa7\x5c\xf8\x08\xc5\xf5\xb5\x92\x52\xa5\xa7\xae\x6b\xcc\xdb\x41\x7f\x34\xb4\x21\x6a\x87\x2d\x69\xf8\x20\x04\x36\x91\x16\x8d\x68\x6c\x94\xf1\x46\x32\xbb\x0c\x53\x46\x58\xee\x05\x8f\x88\xfe\x0d\x2d\xb2\x72\x0a\x82\x8d\x98\xdd\x07\x84\xd8\xde\x52\xaa\x0a\x0b\xb1\x4a\xb4\x7c\xcc\xeb\x4d\x78\xd8\x1d\xc3\xc3\xb7\xea\xda\x0c\xb2\x73\x77\x47\xda\x79\xa2\x1e\x38\xc7\xfc\xae\x0a\x9a\x42\x07\x25\x39\xe5\x08\x40\xfb\xcb\x25\xd9\xba\x14\x3b\xdd\x12\xa7\xf9\x51\xf9\xdc\x1f\xc8\x6e\xf1\xcf\x2b\x11\xb2\x0b\x18\xf2\x55\xf8\xde\x65\xa0\xee\x75\xa7\x84\x5a\x60\xef\x4d\xa0\xde\xdb\x42\xca\x3f\x20\x32\x7a\x2d\xc9\xb4\xbe\x83\x41\x40\x13\xd2\x0a\x9d\xdf\x96\x45\xd2\x2d\x33\x5c\xc8\x1c\x5c\xba\x0d\x10\xb7\x5f\xcd\xf1\x0a\x11\x49\x01\x49\xef\x42\xaa\x1f\x9c\x72\x44\xc4\xa9\x8b\x67\x02\x0c\x88\xa0\x3d\x3d\xf4\xe7\x88\x88\xc2\xeb\x31\x57\x23\xa8\x80\x5c\x37\x6d\xfb\xfa\xe1\x86\x15\x53\x3e\xf8\xad\x5a\x33\x0a\xc3\x65\x3b\xe7\x10\x5a\x42\x1c\x95\x72\x31\xd3\x03\x47\x46\x3e\x32\x46\xee\x04\x6b\xbf\x5d\xd9\x94\x74\xd1\x6f\x53\x33\xc5\x6b\xbc\xe3\x82\x29\x20\x8d\x93\x1f\x50\xa2\x93\x78\xb9\x6b\xa6\x55\x99\x21\x8f\x0c\x9d\xd3\x1a\x1f\x1d\x43\xe4\xd4\xc3\xb2\x2f\xef\x02\x1e\x67\x01\x7a\x6b\x23\xa2\xd4\xe1\xd9\xd9\xa5\xd8\xea\x9d\xf1\x20\x7f\x17\xb7\xf9\xa6\x6e\x4f\x4c\x68\xa7\xdf\xd2\x41\x6d\xa7\xe1\xfc\xe6\x2d\x75\x5c\x09\x94\x76\x63\x4d\xe7\x30\x74\x3b\x8c\xdf\xf4\xdd\xdd\x3c\xd0\x94\x5e\x05\x37\x8f\x35\x57\xbe\x42\x39\x43\xb7\x96\x95\xf6\x25\xda\x69\x67\xc8\x88\xb8\x24\x72\x17\x73\xc1\xf0\x34\x11\x28\x04\x4b\x1a\x22\x15\xf0\xab\x9c\x0d\x0e\xe1\x7a\x65\xeb\xa4\xc8\xd1\x1f\x32\x90\xce\xe0\xb9\xce\x59\x1c\xd4\xc1\xb9\x5e\xde\x8c\x06\xfb\x47\x73\x6d\xac\x9c\x75\x07\x97\x2c\x0e\x1e\x16\xa2\xb5\xb9\x8b\xa9\x12\xa3\xb5\x6e\x7d\x7c\x52\x6d\xea\x21\x57\x5b\xc0\xac\xa6\xb5\xe4\x04\x0d\x68\x64\x11\xdc\x49\x49\x2b\x17\xa0\x43\xf6\x2b\xd9\x5e\xba\x82\x8a\x00\xb3\xdd\xc5\x72\xdd\xfb\x02\xea\x2c\xb2\xb8\x6f\x02\xb6\x5b\x5c\x47\x2c\xf1\x1f\x19\xff\xf8\x41\xd6\x4c\x47\x41\xc4\xbf\x77\xd5\x1c\x61\xc3\x7f\xd8\x65\x73\x84\x0d\x3f\xc2\x86\xa7\x6c\x3c\xc2\x86\x97\xf2\xfc\x90\xb0\xe1\x29\xbb\x1d\x11\xae\x56\x87\xce\x43\x39\xb5\xa8\xa1\x13\x54\xd5\xc6\x5f\x7e\xfd\xed\xb7\xdf\x5f\xfd\xfa\xfa\xe7\x37\xe0\x1d\xfe\xe2\xd7\xa9\x33\x24\x02\x70\x26\x10\x03\x03\xf7\x9d\xea\xb2\xd7\xfb\x24\x2e\x0d\x9e\xa9\x13\x33\x75\x5c\x96\x2f\xdd\x51\x55\xfa\x32\xb1\xf5\x47\x99\x0a\x4a\xc2\xb0\xb9\x40\x53\xa5\xe6\x35\xf6\xa0\x2f\x21\xae\xa2\x80\x71\xa9\x34\xeb\x7b\x05\xcd\x35\xa0\xec\x01\xb7\x0e\x99\xdf\x9d\x93\xd0\x2b\x0f\x28\x33\xca\x2c\x56\x8b\xd3\x1e\xfb\x48\xcd\x55\xf2\x58\xc3\x8c\xf6\xf2\x99\x35\x6f\x56\x2d\x1b\xe8\xe2\x60\x8e\x79\x71\x76\x56\xd2\x89\x5f\x0a\xec\xac\xd4\x04\xb1\x06\x8c\xfc\xd4\xfe\xfe\xed\xdb\x99\x2f\xe4\xf2\x4b\xf5\xc6\xcc\x20\xd9\xbb\x95\x97\xcb\xaf\x2f\x31\xf1\x72\xa9\xed\x87\xf2\x2b\xe4\xe0\x5e\x84\x37\xb6\x6e\x5a\x72\xb8\x14\x48\xc0\x89\xa1\x6d\x05\x6d\xe0\xe4\x6e\xa7\x28\x07\x2a\x5d\x5b\x85\x99\x1a\xbc\x5a\x7d\x17\x88\xb9\x5d\x6e\xf6\x1d\xcd\xf5\xff\xe1\x91\x0c\xaa\x3d\x97\x3b\x01\xd0\x28\x12\x47\x1a\x5a\x2a\x9c\xd5\xa0\x20\xf8\x3b\xb2\xa6\x6c\xb6\x65\x67\xd8\xb1\x52\x39\x5a\xc2\xdb\x20\x6f\xd3\x45\x96\xfa\x50\x96\x57\x63\x10\x10\x32\xc0\xb7\xb4\xce\x65\x1a\xda\xe9\x38\x5c\x1b\xb3\xee\x69\x3b\x41\x63\x4a\xa3\x6a\x2d\x35\xca\xbe\xed\x50\xe2\x84\x48\xc7\xd6\x4d\x0d\x35\x85\xc1\x5d\x12\x67\x14\xac\x58\x6e\xfa\x31\x97\x03\xb5\xe2\x50\x2f\xad\x05\xbe\xed\x5a\x60\x5f\x16\x55\x2d\x7f\x44\x6a\x30\x0b\x0e\x74\x6a\xed\x37\x8b\x43\x5f\x08\x11\x3b\x15\xf8\xb7\x81\xa6\x93\xe7\x8e\x62\x11\xff\x86\xa4\xd8\x82\x1c\xb5\x24\xd9\xbb\x7e\x51\xe7\xc7\xcd\xde\x2e\x79\x36\xea\xda\x15\xcb\x98\x47\x7b\xf0\x66\xd5\xe8\x8c\x2a\xd8\x43\x4f\x63\xb4\x47\x43\xad\x3e\x91\xe7\x2e\x41\xd2\x83\x74\xe3\xf5\xdc\x39\x08\xd1\x47\xec\x7b\xf2\x7a\x1a\x24\x8c\x21\x22\xa2\xf5\x29\x40\x5f\x60\x20\x0c\x22\xfd\xd1\x0b\x75\x70\x2f\xd4\xad\xab\x1f\x2a\x9c\x4f\x85\x2b\xfb\x52\x7d\xf7\x63\xb8\x9e\xf6\xbe\xc3\x88\xa0\xf6\x12\xf2\x76\xf0\x00\x77\x90\x2d\x77\x98\xdb\xa0\xd3\xd7\x8c\x9b\xf6\xa3\xbd\x6d\x36\x0d\x36\xba\xee\xef\x47\xfb\x13\xf8\x4e\xd5\x66\xdc\xee\xb6\xf0\x58\x8d\xf1\x58\x8d\x11\x1c\xab\x31\x1e\xab\x31\x7e\x67\x35\xc6\x1f\x09\x3f\xbe\x6c\x7a\x6f\xd5\x0a\xfd\x8e\xd9\xe9\x0e\xba\x30\xb9\x12\xed\xfb\x5d\x25\x5a\xbe\xcb\x7c\xd9\x45\x0d\x97\x02\x0f\x5a\xd3\x6c\x23\x17\x9a\xc9\xaa\x31\x8e\x67\xdb\x52\x7d\x9b\x92\x2c\xdf\x45\xf9\x46\x95\x5a\xbc\x16\x85\xaa\x2d\x6d\x2b\xb5\xf8\xdb\x45\x49\xd5\x96\xa6\x95\x5a\xbc\x8e\xea\xaa\xb6\x3c\x36\xf1\x38\x96\x59\x7d\x04\xc6\xd3\x71\x3f\x7c\x8a\xfb\x61\xe9\xd2\x3b\x6e\x8e\x07\xd7\x7e\xc7\x9d\xf2\xb8\x53\xd6\xc9\xca\xb1\x80\x72\xc9\x2b\x8f\x05\x94\x8f\x05\x94\x77\x59\x40\xf9\x68\xf6\x3c\x79\xb3\x67\x47\x95\x96\x8f\x86\x0f\xd8\xc9\x66\x76\xac\xdb\x7a\xac\xdb\xba\x51\x54\x76\x56\x6a\xf9\xb8\x62\xc1\x77\xac\xd8\x63\xad\x65\xfd\x73\x5c\xb3\xc7\x5a\xcb\xf9\x57\x1e\xbd\x70\x47\x73\xf4\x50\x31\x1c\xab\x98\x4c\xe6\x50\xa0\x7b\xb8\x21\x5e\xe9\xf3\xe8\xfa\xd2\x7b\xa6\x73\xc1\xf7\x9f\x63\x62\x46\xd8\xb9\x58\x8e\xfd\x27\xba\x7c\x1e\x5d\x03\xc3\x43\xe5\x67\x08\x02\x14\x8b\x5c\xbe\xd0\x0f\x9f\x5f\x52\xa8\x2e\xb6\xb1\x58\xd8\x56\xa2\x98\x96\x29\xeb\x34\x3e\xf9\x0e\x80\x80\x1b\x06\x2f\x19\x5b\xb1\xa8\x62\x9a\x48\x78\x85\x02\xd2\x81\xaf\x4e\x01\x0e\x29\xfc\x9a\xaf\x79\x67\x63\x16\x2d\x5b\x30\x9c\x44\xae\xd7\x36\xa6\x53\x85\x4a\xd4\x3f\x87\x31\x9e\xbe\x8b\xc2\x4d\x69\x5b\x41\x4f\xa0\xb3\x84\xad\x43\x4c\x85\xa1\xe3\x19\x80\xe0\x3d\x9e\x2f\x80\x11\x5e\x1c\x61\xb1\xf6\xf4\x94\x83\x5b\xee\x7b\x12\x15\x52\x72\x91\x63\xab\x54\xc1\xb7\xac\x01\xd8\x1d\x2e\xc5\x08\xb1\x89\xcd\x0e\xdf\x82\x5f\xaa\x8e\x8a\x2a\x05\x91\x16\x65\x72\x29\xea\x57\x31\xcd\x58\xe7\xf5\x83\xb9\xcd\x03\x28\xa1\xb1\x7c\xc1\x85\xe9\x7a\x0b\x62\xdb\xa6\xdd\x23\xfa\x3c\x88\xbf\x8b\xde\xef\xfb\xe0\x72\x30\x6a\x44\xed\x82\x3c\xa7\xd4\x06\xc3\x99\xdc\xa7\x56\x38\xb4\x59\xa5\x4e\x2b\x15\x71\x0f\x13\x41\x97\x50\xe0\x40\xdd\x10\x24\x1c\x15\xc2\xfe\x38\x5c\x22\x7f\xbb\x4f\x53\x49\x87\xe7\x16\x4a\x4c\x8d\x3b\x37\xde\x52\x5e\x5f\x0e\x46\x8f\x7d\x4d\xb5\x2e\xe5\x64\xaa\xfb\xea\x82\x68\x29\xe6\x72\xb8\x26\x70\x89\x03\x7b\x03\xd3\xfd\xca\x4d\xfb\x34\xa2\xfe\x3b\x00\x00\xff\xff\x48\xff\xcd\x9c\x9c\x80\x01\x00"), }, "/container.yaml": &vfsgen۰CompressedFileInfo{ name: "container.yaml", modTime: time.Time{}, - uncompressedSize: 4900, + uncompressedSize: 5337, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x57\x4b\x6f\xe3\x36\x10\xbe\xeb\x57\x0c\xe2\x43\x5b\x20\x91\x37\x39\xba\x27\xaf\xb3\x0f\x63\x13\x67\x11\x39\x5d\xec\x49\xa0\xa9\xb1\x3c\x35\x45\xb2\xe4\xc8\x5e\x23\xcd\x7f\x2f\x28\x4b\xb6\xe5\x38\x5d\xbb\x0f\xb4\x68\x57\x87\x20\x22\xe7\xf1\x7d\x33\x23\xf2\x73\x07\x06\xc6\xae\x1c\xe5\x33\x86\xab\x57\x57\x57\xf0\xce\x98\x5c\x21\xdc\xdc\x0c\xa2\x4e\xd4\x81\x1b\x92\xa8\x3d\x66\x50\xea\x0c\x1d\xf0\x0c\xa1\x6f\x85\x9c\x61\xb3\x73\x0e\x3f\xa1\xf3\x64\x34\x5c\xc5\xaf\xe0\xfb\x60\x70\x56\x6f\x9d\xfd\xf0\x63\xd4\x81\x95\x29\xa1\x10\x2b\xd0\x86\xa1\xf4\x08\x3c\x23\x0f\x53\x52\x08\xf8\x45\xa2\x65\x20\x0d\xd2\x14\x56\x91\xd0\x12\x61\x49\x3c\xab\xd2\xd4\x41\xe2\xa8\x03\x9f\xeb\x10\x66\xc2\x82\x34\x08\x90\xc6\xae\xc0\x4c\x77\xed\x40\x70\x05\xb8\x7a\x66\xcc\xb6\xd7\xed\x2e\x97\xcb\x58\x54\x68\x63\xe3\xf2\xae\x5a\x5b\xfa\xee\xcd\x70\xf0\x66\x94\xbc\xb9\xb8\x8a\x5f\x55\x3e\x0f\x5a\xa1\xf7\xe0\xf0\x97\x92\x1c\x66\x30\x59\x81\xb0\x56\x91\x14\x13\x85\xa0\xc4\x12\x8c\x03\x91\x3b\xc4\x0c\xd8\x04\xc0\x4b\x47\x4c\x3a\x3f\x07\x6f\xa6\xbc\x14\x0e\xa3\x0e\x64\xe4\xd9\xd1\xa4\xe4\x56\xb5\x1a\x78\xe4\x5b\x06\x46\x83\xd0\x70\xd6\x4f\x60\x98\x9c\xc1\xeb\x7e\x32\x4c\xce\xa3\x0e\x7c\x1a\x8e\xdf\xdf\x3d\x8c\xe1\x53\xff\xfe\xbe\x3f\x1a\x0f\xdf\x24\x70\x77\x0f\x83\xbb\xd1\xf5\x70\x3c\xbc\x1b\x25\x70\xf7\x16\xfa\xa3\xcf\xf0\x61\x38\xba\x3e\x07\x24\x9e\xa1\x03\xfc\x62\x5d\xc0\x6f\x1c\x50\xa8\x23\x66\xa1\x68\x09\x62\x0b\xc0\xd4\xac\x01\x79\x8b\x92\xa6\x24\x41\x09\x9d\x97\x22\x47\xc8\xcd\x02\x9d\x26\x9d\x83\x45\x57\x90\x0f\xdd\xf4\x20\x74\x16\x75\x40\x51\x41\x2c\xb8\x5a\x79\x46\x2a\x8e\x22\x61\xa9\xee\x7f\x0f\xa4\x71\x18\x4b\xed\x8a\x58\x2a\x53\x66\x71\x5e\x8d\x52\x2c\x4d\xd1\x5d\x5c\x0a\x65\x67\xe2\x32\x9a\x93\xce\x7a\x90\xa0\x5b\x90\xc4\x5b\x61\x2d\xe9\x3c\x2a\x90\x45\x26\x58\xf4\x22\x00\x2d\x0a\x0c\xa1\x74\x68\x35\xba\xc3\xf1\x6a\x3b\x6f\x85\x0c\xc6\xda\x15\x17\x7e\xe5\x19\x8b\x28\xb0\xdb\x86\x19\x34\x61\x22\x80\x45\x03\x73\x71\x39\x41\x16\x97\x11\x80\x5f\xc3\x78\x6f\x3c\x8f\x2a\xfb\xb3\x6d\xde\x75\x32\x61\xc9\x87\x84\x67\x11\x80\x43\x6f\x4a\x27\xd1\x87\xf8\x00\x17\x75\x8e\xb5\x61\xba\xf1\x4c\xa5\x2a\x3d\x57\x29\xc3\xb3\x26\xbc\x01\x32\x68\x6d\x52\x36\xc6\xc2\x2a\xc1\x21\xf7\xe3\xa3\x75\xe6\x67\x94\xfc\xf4\xd4\x7d\x7c\x54\x46\x56\x65\xaf\x5e\x42\xa6\xa7\xa7\xb3\x67\x5e\x03\xa1\x5f\xe3\x83\xc7\x6c\x6c\x6e\x05\xcb\xd9\x7d\x0d\x71\xcd\x86\x5d\x89\xb5\x4b\x83\xbd\xbf\x10\xa4\xc2\x54\x0f\x75\xdf\x7b\xe4\xa1\x5e\xa0\x66\xe3\x56\x2d\x6b\xca\xb5\x71\x98\xbd\x25\x54\x59\xcd\xb6\x66\x6c\x32\x4c\xad\x31\xaa\x5e\x6b\x1a\x57\x37\x72\x6b\xba\x2e\x4d\xf8\xbb\x59\x52\x62\x82\xca\xf7\x36\x50\xd2\xf5\xc2\x1e\xc0\xe1\xf5\x36\x08\x0b\x97\x23\x57\x28\x5a\xb1\x32\x72\x28\x99\x16\xd8\xc2\xe6\xb0\x30\x0b\x4c\x33\x9c\x8a\x52\x71\xfa\x0c\x6a\xc9\x81\xf7\xeb\x92\x1f\xb4\x43\x91\x85\x97\xe7\x04\x0b\xd2\x69\x21\x42\x87\xd2\x7a\x5e\xea\xcd\x4d\x7b\x5b\xe6\xbc\xb2\xd8\x83\xba\x6d\x9b\x75\x00\x9e\xd6\xa8\xdb\x5b\x0d\xc9\x7b\x9c\xa2\x43\x2d\xdb\x04\x36\x4e\xf5\x00\xa5\xa2\x64\xe3\xa5\x50\xa4\xf3\x38\xfc\x9f\x5a\x67\x16\x14\x40\x91\xce\x1b\xa2\x3e\xae\xa7\x38\x15\x52\x9a\x52\xef\xc2\x98\xe3\xaa\xd7\x0c\x79\x7f\xbd\x7b\x8f\xd3\x1d\x83\x7c\x31\xef\xed\xbc\x36\xd3\x3a\xec\xdf\x26\x2d\xaf\x96\xcd\x81\x0f\x69\x27\xa2\x33\xa5\xed\x01\x89\xe2\xc5\x2f\xf7\x60\x7b\xb1\x10\xa4\x0e\x15\x43\x23\x2f\x8d\x9b\xef\xb3\xaa\x97\x8f\xa2\x33\x30\x85\x2d\x19\x47\xcf\x22\x1d\xc7\x45\xae\xdd\x4f\xe3\xe3\x51\x4d\x53\x45\x7a\x7e\x88\x93\x2f\x27\x2f\xd0\xda\xee\x9c\xc2\x2c\x39\x14\xef\x9f\x22\x57\x7d\x78\xd2\xe8\x29\xe5\xff\xf5\xd1\xdc\x61\x3a\x31\x86\xd3\x8c\xfc\x3c\x9d\x17\x3e\x9d\xe3\x6a\x9f\x6b\x30\xb8\x26\x3f\xff\x70\x9b\x0c\xdc\xca\xb2\xf9\x80\xab\xa3\x18\xef\x3a\x9c\x4c\x76\x5e\xf8\xbf\xb4\xb5\x1c\x94\x43\x75\x2f\x35\xc4\x6d\x39\xf1\xe5\x24\x66\x63\x49\xee\x04\xcd\xd0\x4b\x47\x96\x2b\x80\xbf\x5e\xb4\xb0\x8d\x67\x08\x1f\xcb\x49\x52\x4e\xc6\xc1\x2b\x48\x2a\x8f\x3a\xab\x94\xc5\x6e\x0a\x60\x13\xef\x97\xb1\xca\x73\x54\xdd\x76\x32\x9c\x5c\xb6\x9a\xd4\xd7\x2a\xb7\x10\xaa\xc4\x9d\x1b\xbc\x3e\xed\x7d\x77\xf7\x2a\xaf\x10\x87\xa5\xca\x7a\x7b\x8b\xbf\x38\x4a\xd5\xff\x15\x90\x23\xeb\x39\x2c\xea\xcb\x2d\x86\x04\x39\xe8\xd2\x46\x64\xa3\xca\x60\x49\x4a\x81\xf0\x9e\x72\x0d\xa4\x3d\x07\x95\xed\x5b\xfe\x95\x90\x26\x0f\xe1\xb6\x0c\xcd\x70\xa5\x0e\xfa\x74\x47\x2c\x62\x56\x21\x5c\x97\x27\x86\x71\xb0\x26\x1f\xe4\xfc\xb4\x54\xad\x58\x41\x65\xba\x52\x57\x6a\x32\x1c\x48\xca\x88\xcc\x87\x68\xde\x28\x04\x46\x2d\x34\x57\xb1\xfc\xb3\xd6\x86\xd5\x77\x21\xc1\x49\x87\x7a\xe3\xf4\x37\x9c\x7c\x2f\x2a\xbc\x7d\x6d\xb1\xa7\xf1\x02\xa6\x8f\xdb\xdd\x63\x45\x5e\x7d\xed\x1f\x10\x7c\x1d\xc8\x0c\x7a\xfd\x1d\x07\x7d\x6f\x1c\x07\xa5\x1d\x24\x90\x5a\x9d\xa8\x08\xa7\x42\xf9\x3f\x26\x09\x8f\x57\x7a\xa7\x28\xb9\xdf\x17\x44\xd5\x54\xd4\x55\x69\xcf\xc4\xbe\x54\x3a\x66\x5c\x0e\x0a\xf0\x53\xc6\xe5\x6b\xbf\x48\x9a\xc7\x0a\x87\x9a\x5b\xd5\xfb\x76\x41\xfe\x5f\x2e\xc8\x6f\xc7\xf8\xbf\xee\x18\xff\xd3\x3f\xde\x7e\x0b\x00\x00\xff\xff\x67\x51\xd7\xb6\x24\x13\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x57\x41\x73\xdb\xb6\x12\xbe\xf3\x57\xec\x58\x87\xf7\xde\x8c\x4d\xc7\x3e\xea\x9d\x14\xd9\x49\x34\xb6\xe5\x8c\x29\x37\x93\x13\x07\x02\x57\x14\x2a\x10\x40\x81\xa5\x14\x8e\xeb\xff\xde\x01\x48\x5a\xa2\x24\x37\x52\xda\x4e\x33\x6d\x74\xf0\x98\xc0\x62\xf7\xfb\x76\x17\xc0\x87\x1e\x0c\xb5\xa9\xac\xc8\xe7\x04\x97\x6f\x2e\x2f\xe1\xbd\xd6\xb9\x44\xb8\xbd\x1d\x46\xbd\xa8\x07\xb7\x82\xa3\x72\x98\x41\xa9\x32\xb4\x40\x73\x84\x81\x61\x7c\x8e\xed\xcc\x29\xfc\x84\xd6\x09\xad\xe0\x32\x7e\x03\xff\xf5\x06\x27\xcd\xd4\xc9\xff\xfe\x1f\xf5\xa0\xd2\x25\x14\xac\x02\xa5\x09\x4a\x87\x40\x73\xe1\x60\x26\x24\x02\x7e\xe1\x68\x08\x84\x02\xae\x0b\x23\x05\x53\x1c\x61\x25\x68\x1e\xc2\x34\x4e\xe2\xa8\x07\x9f\x1b\x17\x7a\x4a\x4c\x28\x60\xc0\xb5\xa9\x40\xcf\x36\xed\x80\x51\x00\x1c\x7e\x73\x22\xd3\x3f\x3f\x5f\xad\x56\x31\x0b\x68\x63\x6d\xf3\x73\x59\x5b\xba\xf3\xdb\xd1\xf0\x7a\x9c\x5c\x9f\x5d\xc6\x6f\xc2\x9a\x47\x25\xd1\x39\xb0\xf8\x4b\x29\x2c\x66\x30\xad\x80\x19\x23\x05\x67\x53\x89\x20\xd9\x0a\xb4\x05\x96\x5b\xc4\x0c\x48\x7b\xc0\x2b\x2b\x48\xa8\xfc\x14\x9c\x9e\xd1\x8a\x59\x8c\x7a\x90\x09\x47\x56\x4c\x4b\xea\x64\xab\x85\x27\x5c\xc7\x40\x2b\x60\x0a\x4e\x06\x09\x8c\x92\x13\x78\x3b\x48\x46\xc9\x69\xd4\x83\x4f\xa3\xc9\x87\xfb\xc7\x09\x7c\x1a\x3c\x3c\x0c\xc6\x93\xd1\x75\x02\xf7\x0f\x30\xbc\x1f\x5f\x8d\x26\xa3\xfb\x71\x02\xf7\xef\x60\x30\xfe\x0c\x37\xa3\xf1\xd5\x29\xa0\xa0\x39\x5a\xc0\x2f\xc6\x7a\xfc\xda\x82\xf0\x79\xc4\xcc\x27\x2d\x41\xec\x00\x98\xe9\x1a\x90\x33\xc8\xc5\x4c\x70\x90\x4c\xe5\x25\xcb\x11\x72\xbd\x44\xab\x84\xca\xc1\xa0\x2d\x84\xf3\xd5\x74\xc0\x54\x16\xf5\x40\x8a\x42\x10\xa3\x30\xb2\x43\x2a\x8e\x22\x66\x44\x53\xff\x3e\x70\x6d\x31\xe6\xca\x16\x31\x97\xba\xcc\xe2\x3c\xb4\x52\xcc\x75\x71\xbe\xbc\x60\xd2\xcc\xd9\x45\xb4\x10\x2a\xeb\x43\x82\x76\x29\x38\xde\x31\x63\x84\xca\xa3\x02\x89\x65\x8c\x58\x3f\x02\x50\xac\x40\xef\x4a\xf9\x52\xa3\xdd\xef\xaf\xb1\x73\x86\x71\x6f\xac\x6c\x71\xe6\x2a\x47\x58\x44\x9e\xdd\xda\xcd\xb0\x75\x13\x01\x2c\x5b\x98\xcb\x8b\x29\x12\xbb\x88\x00\x5c\x0d\xe3\x83\x76\x34\x0e\xf6\x27\xeb\xb8\x75\x30\x66\x84\xf3\x01\x4f\x22\x00\x8b\x4e\x97\x96\xa3\xf3\xfe\x01\xce\x9a\x18\xb5\x61\xfa\xb2\x32\xe5\xb2\x74\x14\x42\xfa\x5f\x4d\xf8\x05\xc8\xb0\x33\x29\xb2\x09\x16\x46\x32\xf2\xb1\x9f\x9e\x8c\xd5\x3f\x23\xa7\xe7\xe7\xf3\xa7\x27\xa9\x79\x48\x7b\xf8\xf0\x91\x9e\x9f\x4f\x76\x56\x0d\x99\x7a\x8b\x8f\x0e\xb3\x89\xbe\x63\xc4\xe7\x0f\x0d\xc4\x9a\x0d\xd9\x12\x9b\x25\x2d\xf6\xc1\x92\x09\xe9\xbb\x7a\xa4\x06\xce\x21\x8d\xd4\x12\x15\x69\x5b\x75\xac\x45\xae\xb4\xc5\xec\x9d\x40\x99\x35\x6c\x1b\xc6\x3a\xc3\xd4\x68\x2d\x9b\xb1\xb6\x70\x4d\x21\xd7\xa6\x75\x6a\xfc\xdf\x97\x21\xc9\xa6\x28\x5d\xff\x05\x4a\x5a\x0f\x6c\x01\x1c\x5d\xad\x9d\x10\xb3\x39\x52\x40\xd1\xf1\x95\x09\x8b\x9c\xc4\x12\x3b\xd8\x2c\x16\x7a\x89\x69\x86\x33\x56\x4a\x4a\x77\xa0\x96\xe4\x79\xbf\x2d\xe9\x51\x59\x64\x99\xff\xd8\x25\x58\x08\x95\x16\xcc\x57\x28\x6d\xfa\xa5\x99\x7c\x29\x6f\xc7\x9c\x2a\x83\x7d\x68\xca\xf6\x32\x0e\x40\xb3\x06\x75\x77\xaa\x25\xf9\x80\x33\xb4\xa8\x78\x97\xc0\xcb\xa2\xa6\x81\x52\x56\x92\x76\x9c\x49\xa1\xf2\xd8\xff\x9f\x1a\xab\x97\xc2\x83\x12\x2a\x6f\x89\xba\xb8\xe9\xe2\x94\x71\xae\x4b\xb5\x09\x63\x81\x55\xbf\x6d\xf2\x41\x3d\xfb\x80\xb3\x0d\x83\x7c\xb9\xe8\x6f\x7c\xb6\xdd\x3a\x1a\xdc\x25\x9d\x55\x1d\x9b\x3d\x1b\x69\xc3\xa3\xd5\xa5\xe9\x83\x60\xc5\xab\x3b\x77\x6f\x79\xb1\x60\x42\xee\x4b\x86\x42\x5a\x69\xbb\xd8\x66\xd5\x0c\x1f\x44\x67\xa8\x0b\x53\x12\x8e\x77\x3c\x1d\xc6\x85\xd7\xcb\x8f\xe3\xe3\x50\xce\x52\x29\xd4\x62\x1f\x27\x57\x4e\x5f\xa1\xb5\x9e\x39\x86\x59\xb2\xcf\xdf\xdf\x45\x2e\x6c\x3c\xae\xd5\x4c\xe4\xff\xf4\xd6\xdc\x60\x3a\xd5\x9a\xd2\x4c\xb8\x45\xba\x28\x5c\xba\xc0\x6a\x9b\xab\x37\xb8\x12\x6e\x71\x73\x97\x0c\x6d\x65\x48\xdf\x60\x75\x10\xe3\xcd\x05\x47\x93\x5d\x14\xee\x4f\x2d\x2d\x79\xe5\x10\xee\xa5\x96\xb8\x29\xa7\xae\x9c\xc6\xa4\x8d\xe0\x1b\x4e\x33\x74\xdc\x0a\x43\x01\xe0\xaf\x67\x1d\x6c\x93\x39\xc2\xc7\x72\x9a\x94\xd3\x89\x5f\xe5\x25\x95\x43\x95\x05\x65\xb1\x19\x02\x48\xc7\xdb\x69\x0c\x71\x0e\xca\xdb\x46\x84\xa3\xd3\xd6\x90\xfa\x5a\xe6\x96\x4c\x96\xb8\x71\x83\x37\xa7\xbd\x3b\xdf\xbc\xca\x03\x62\x3f\x14\xac\xd7\xb7\xf8\xab\xad\x14\xfe\x0f\x40\x0e\xcc\xe7\xa8\x68\x2e\xb7\x18\x12\x24\xaf\x4b\x5b\x91\x8d\x32\x83\x95\x90\x12\x98\x73\x22\x57\x20\x94\x23\xaf\xb2\x5d\x67\x7d\x10\xd2\xc2\x81\xbf\x2d\x7d\x31\x6c\xa9\xbc\x3e\xdd\x10\x8b\x98\x05\x84\x75\x7a\x62\x98\x78\x6b\xe1\xbc\x9c\x9f\x95\xb2\xe3\xcb\xab\x4c\x5b\xaa\xa0\x26\xfd\x81\x24\x35\xcb\x9c\xf7\xe6\xb4\x44\x20\x54\x4c\x51\xf0\xe5\x76\x4a\xeb\x47\xdf\xfb\x00\x47\x1d\xea\xed\xa2\xbf\xe8\xe4\xfb\xf6\x6b\xf9\x98\x13\x61\xe7\x2c\x38\xb0\xdc\x7e\x27\x0d\x4b\x47\xba\x40\x0b\x77\x4c\xb1\x1c\x33\xb8\x56\xdc\x9f\x17\x7e\x03\xdd\x60\xe5\x8b\x14\x5e\x2d\x58\x0f\xfb\xb2\x76\xdc\x79\x18\xfe\x69\xb2\x00\x46\xe4\xdf\x4a\xb5\x35\xe3\xf3\xba\xe6\x42\x35\x1b\x33\xc3\xd0\x21\xf1\xf7\x7d\x64\xbd\x2a\xcb\xb7\x05\xe1\x96\x30\xf7\x8d\xf4\x71\x3d\x7b\xa8\x32\x6f\x9a\x62\x8f\x4a\xef\x41\xa6\xd1\xa9\xff\x90\x7f\x94\x69\x4b\xfe\x79\xe4\x75\xab\xac\x8e\x94\xf1\x33\x26\xdd\xb7\xe9\xf8\xc3\xe5\xf9\x31\xf2\xfb\xf7\x55\x6c\x68\xed\x26\x2b\xdd\x9e\xde\xde\x48\x87\xec\xf1\xbd\xaf\xa6\xc3\x7b\xe9\xeb\xcf\xc8\xf6\x67\x98\x45\x45\x9d\xec\xfd\x50\x35\xff\x16\x55\xf3\xe3\xee\xfd\x0e\xef\xde\x3f\xf8\xe2\xfe\x2d\x00\x00\xff\xff\xef\x0c\xbd\x53\xd9\x14\x00\x00"), }, "/dataflow.yaml": &vfsgen۰CompressedFileInfo{ name: "dataflow.yaml", @@ -139,16 +139,16 @@ var Assets = func() http.FileSystem { "/pubsub.yaml": &vfsgen۰CompressedFileInfo{ name: "pubsub.yaml", modTime: time.Time{}, - uncompressedSize: 4132, + uncompressedSize: 4471, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x57\x4f\x73\xdb\xb6\x12\xbf\xf3\x53\xec\x58\x07\x27\x33\x16\x15\xfb\xa8\x77\x79\x8a\xec\xbc\xe8\xd9\x96\x33\xa2\xdc\x4c\x4e\x9a\x25\xb8\x22\x51\x81\x00\x0a\x80\x52\x38\xae\xbf\x7b\x07\xfc\x23\x53\xb2\x5c\xcb\xcd\x34\xad\x2e\xe6\x00\xbb\xbf\xfd\xf7\xdb\x5d\xb8\x07\x63\xa5\x4b\xc3\xd3\xcc\xc1\xc5\x87\x8b\x0b\xf8\x9f\x52\xa9\x20\xb8\xb9\x19\x07\xbd\xa0\x07\x37\x9c\x91\xb4\x94\x40\x21\x13\x32\xe0\x32\x82\x91\x46\x96\x51\x7b\x73\x06\xbf\x90\xb1\x5c\x49\xb8\x08\x3f\xc0\x3b\x2f\x70\xd2\x5c\x9d\xbc\xff\x4f\xd0\x83\x52\x15\x90\x63\x09\x52\x39\x28\x2c\x81\xcb\xb8\x85\x25\x17\x04\xf4\x9d\x91\x76\xc0\x25\x30\x95\x6b\xc1\x51\x32\x82\x0d\x77\x59\x65\xa6\x01\x09\x83\x1e\x7c\x6b\x20\x54\xec\x90\x4b\x40\x60\x4a\x97\xa0\x96\x5d\x39\x40\x57\x39\x5c\xfd\x32\xe7\xf4\x70\x30\xd8\x6c\x36\x21\x56\xde\x86\xca\xa4\x03\x51\x4b\xda\xc1\xcd\x64\x7c\x35\x8d\xae\xfa\x17\xe1\x87\x4a\xe7\x5e\x0a\xb2\x16\x0c\xfd\x56\x70\x43\x09\xc4\x25\xa0\xd6\x82\x33\x8c\x05\x81\xc0\x0d\x28\x03\x98\x1a\xa2\x04\x9c\xf2\x0e\x6f\x0c\x77\x5c\xa6\x67\x60\xd5\xd2\x6d\xd0\x50\xd0\x83\x84\x5b\x67\x78\x5c\xb8\x9d\x6c\xb5\xee\x71\xbb\x23\xa0\x24\xa0\x84\x93\x51\x04\x93\xe8\x04\x3e\x8e\xa2\x49\x74\x16\xf4\xe0\xeb\x64\xfe\xf9\xee\x7e\x0e\x5f\x47\xb3\xd9\x68\x3a\x9f\x5c\x45\x70\x37\x83\xf1\xdd\xf4\x72\x32\x9f\xdc\x4d\x23\xb8\xfb\x04\xa3\xe9\x37\xb8\x9e\x4c\x2f\xcf\x80\xb8\xcb\xc8\x00\x7d\xd7\xc6\xfb\xaf\x0c\x70\x9f\x47\x4a\x7c\xd2\x22\xa2\x1d\x07\x96\xaa\x76\xc8\x6a\x62\x7c\xc9\x19\x08\x94\x69\x81\x29\x41\xaa\xd6\x64\x24\x97\x29\x68\x32\x39\xb7\xbe\x9a\x16\x50\x26\x41\x0f\x04\xcf\xb9\x43\x57\x9d\x3c\x0b\x2a\x0c\x02\xd4\xbc\xa9\xff\x10\x98\x32\x14\x32\x69\xf2\x90\x09\x55\x24\x61\x5a\x51\x29\x64\x2a\x1f\xac\xcf\x51\xe8\x0c\xcf\x83\x15\x97\xc9\x10\x22\x32\x6b\xce\xe8\x16\xb5\xe6\x32\x0d\x72\x72\x98\xa0\xc3\x61\x00\x20\x31\xa7\x21\xe8\x22\xb6\x45\x7c\x18\xac\x11\xb2\x1a\x19\x0d\xc1\x8b\xf4\x6d\x69\x1d\xe5\x81\x0f\xed\x09\xe3\x4b\x11\x47\x45\x1c\x00\xac\x5b\x07\xd7\xe7\x31\x39\x3c\x0f\x00\x6c\xed\xc0\x67\x65\xdd\xb4\x12\x3e\x69\x2c\xd6\x66\x50\x73\xeb\x4d\x9d\x04\x00\x86\xac\x2a\x0c\x23\xeb\x91\x01\xfa\x0d\x7a\x2d\xb8\xa8\xd5\x16\x96\x65\x94\x63\x50\xb3\xaf\x8e\xb1\x36\x1f\x75\x2f\x5a\xa8\xc9\xe5\xb0\x39\x01\x70\x68\x52\x72\x9f\x38\x89\x64\x58\x41\x37\x37\x3c\x99\x53\xae\x05\xba\xca\x39\xa3\x7e\x25\xe6\xec\xe0\xe1\xa1\xf9\x7c\x7c\x1c\xd4\x36\xfd\x99\x57\x7b\x7c\x3c\x79\xa6\x39\x46\xf9\x91\xee\x2d\x25\x73\x75\x8b\x8e\x65\xb3\xc6\x7e\x1d\xb2\x33\x05\xed\x39\x36\x5a\x23\x17\x9e\xf4\x13\x39\xb2\x96\xdc\x44\xae\x49\x3a\x65\xca\x21\x2c\x51\xd8\x56\xbc\xad\x57\x53\xbf\xa7\x60\xea\xd4\x74\xa2\xc8\x38\x19\x34\x2c\xe3\x0c\xc5\x8c\x96\x64\x48\x6e\x33\x59\x67\xd3\x95\xda\xd7\xbb\x8e\x6a\x7b\x0e\xb0\xa2\x72\x7b\x3c\xa3\xe5\x9e\xa3\x2f\x60\x2d\x9b\x3c\x1e\x0b\xe7\x7f\x09\x59\x66\xb8\x76\x15\x43\x7e\xef\x77\x6e\x00\xe6\x19\xb5\x4a\xe0\x32\x74\xf5\xf0\x6a\x9d\x80\x98\x84\x92\xa9\x05\xa7\xc2\x8e\x5a\xba\x5e\x0d\x77\x50\x1a\x3e\x3c\xf3\xe9\x20\x33\x3b\x38\x46\x15\x7a\xb8\xb5\x96\xa3\xc4\x94\xcc\x8b\x3d\xf1\x22\x35\x8b\x78\x1b\xe0\x21\x82\x3e\xbf\xe6\x98\x8f\x95\x5c\xf2\x4e\x61\xb5\x12\x9c\x95\xd3\x57\xe0\x17\x1c\xf3\x45\x2d\xba\xa7\x79\x4b\x79\x4c\xe6\x28\xfd\xbc\x12\xdd\xea\x9b\xb6\xd2\x75\x69\x3b\x19\xaa\x83\x3d\x10\x9e\xff\xd5\xb4\xea\x30\xf1\x78\xce\x02\x08\x8c\x49\xd8\x61\xf3\xf7\xef\x69\xde\x8e\xdb\x3f\xab\x85\x0f\x48\x1f\xee\xa3\xaa\x59\x9c\xd2\x9c\xed\xb6\xca\xb6\xbf\xaa\xbb\x23\x7b\x68\x6b\xc2\xaf\x4e\x6c\x58\x37\xf7\x00\xdd\xa6\x59\xa3\x28\xe8\xb5\xa4\x55\x66\xfd\x51\x25\xfd\x94\x2e\xf8\x93\xae\x7b\x32\xf7\xe6\xce\x7b\x65\x09\xb5\xd9\xda\x66\x25\x21\x4c\x16\x82\x9c\x23\xd3\x74\x41\xd8\x3d\xda\x4f\x5a\x95\x65\x2f\x70\x53\xdd\xcf\x9f\xe7\xfb\x9f\x8a\xe9\x07\xcb\xc1\x94\xf4\xef\x34\x32\x47\x0d\xfa\x83\x43\xfb\xf0\x28\xeb\x66\xf0\xa5\x54\xbc\x69\x78\x55\x80\x7f\x65\x6a\x3d\x29\xbe\x71\x5c\xed\x93\xe0\xd9\x9c\x3a\x72\xc3\xad\x72\xbb\x58\x51\xb9\xd8\x99\x5b\xaf\x2f\xb3\xeb\xdb\x68\x6c\x4a\xed\xd4\x35\x95\xbe\x1f\x63\xf2\x6f\xf2\xea\x55\xab\x8d\x72\x7e\xcf\x21\x63\xfe\x31\xe9\x14\xe4\x64\x2d\xa6\x64\x3d\x69\x04\xb7\x19\x25\x3b\x78\x4a\xd6\xeb\xb0\x0a\x29\xf4\x8f\x74\xd3\xd6\xf0\xd4\xfa\xca\x0c\xa2\x22\x6e\x5f\x5a\x1e\x56\x15\x72\x77\xfd\xbd\x3b\x6d\x6e\xfb\x0f\x0f\x5f\x66\x77\xff\xbf\x1a\xcf\x17\xd3\xfb\xdb\x8f\x57\xb3\xc7\xc7\xff\xa6\x4c\xf7\x2d\xf6\x1b\xc2\x72\xcc\xc3\xb4\x91\x6e\xa0\x3c\x65\x4f\xdf\xef\x00\xe6\x85\x75\x90\xe1\x9a\xe0\xd4\x28\x41\x76\x50\x51\x7c\x95\xdb\x90\xb5\x51\x5f\xc9\xea\x93\xcc\x25\x35\x1f\xa7\x3e\xd6\xf6\x3f\x93\x1d\xb8\x25\xa1\x2b\x0c\x85\xfb\x4d\xbb\xca\xed\x35\x95\x47\xf5\x6a\x37\xe1\x6f\x6e\xd6\xca\xf1\x57\x3a\x75\x67\x05\x59\x12\xcb\x85\xe0\x72\x75\x88\x32\xf5\x7b\x71\x61\xc9\xf9\x7f\x5d\x6c\xb8\xf3\x66\xdd\xc6\x56\x9f\xbe\x61\x0e\x45\xfb\x30\x3f\x7f\x10\x3d\xbd\x84\xf7\x26\xd1\xbf\x6c\xe9\x6f\x07\xe6\x4f\xde\xf6\x3f\x38\x90\xff\x08\x00\x00\xff\xff\x44\xfd\x63\xb1\x24\x10\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x57\x4f\x73\xe2\x36\x14\xbf\xfb\x53\xbc\x81\x43\x76\x67\x82\xd9\xe4\x48\x2f\x25\x24\xdb\xa5\x49\xc8\x16\x93\xee\xec\x89\x91\xe5\x87\xad\x22\x4b\x5a\x49\x86\xf5\xa4\xf9\xee\x1d\xc9\x36\x31\x84\x34\xd0\x9d\xa6\xe5\x12\x47\x7a\x7f\x7f\xef\xaf\xba\x30\x92\xaa\xd4\x2c\xcd\x2c\x9c\x7f\x38\x3f\x87\x5f\xa4\x4c\x39\xc2\xcd\xcd\x28\xe8\x06\x5d\xb8\x61\x14\x85\xc1\x04\x0a\x91\xa0\x06\x9b\x21\x0c\x15\xa1\x19\x36\x37\xa7\xf0\x3b\x6a\xc3\xa4\x80\xf3\xf0\x03\xbc\x73\x04\x9d\xfa\xaa\xf3\xfe\xa7\xa0\x0b\xa5\x2c\x20\x27\x25\x08\x69\xa1\x30\x08\x36\x63\x06\x16\x8c\x23\xe0\x77\x8a\xca\x02\x13\x40\x65\xae\x38\x23\x82\x22\xac\x99\xcd\xbc\x9a\x5a\x48\x18\x74\xe1\x6b\x2d\x42\xc6\x96\x30\x01\x04\xa8\x54\x25\xc8\x45\x9b\x0e\x88\xf5\x06\xfb\x5f\x66\xad\x1a\xf4\xfb\xeb\xf5\x3a\x24\xde\xda\x50\xea\xb4\xcf\x2b\x4a\xd3\xbf\x19\x8f\xae\x26\xd1\x55\xef\x3c\xfc\xe0\x79\xee\x05\x47\x63\x40\xe3\xb7\x82\x69\x4c\x20\x2e\x81\x28\xc5\x19\x25\x31\x47\xe0\x64\x0d\x52\x03\x49\x35\x62\x02\x56\x3a\x83\xd7\x9a\x59\x26\xd2\x53\x30\x72\x61\xd7\x44\x63\xd0\x85\x84\x19\xab\x59\x5c\xd8\x2d\xb4\x1a\xf3\x98\xd9\x22\x90\x02\x88\x80\xce\x30\x82\x71\xd4\x81\x8b\x61\x34\x8e\x4e\x83\x2e\x7c\x19\xcf\x3e\xdd\xdd\xcf\xe0\xcb\x70\x3a\x1d\x4e\x66\xe3\xab\x08\xee\xa6\x30\xba\x9b\x5c\x8e\x67\xe3\xbb\x49\x04\x77\x1f\x61\x38\xf9\x0a\xd7\xe3\xc9\xe5\x29\x20\xb3\x19\x6a\xc0\xef\x4a\x3b\xfb\xa5\x06\xe6\x70\xc4\xc4\x81\x16\x21\x6e\x19\xb0\x90\x95\x41\x46\x21\x65\x0b\x46\x81\x13\x91\x16\x24\x45\x48\xe5\x0a\xb5\x60\x22\x05\x85\x3a\x67\xc6\x45\xd3\x00\x11\x49\xd0\x05\xce\x72\x66\x89\xf5\x27\xcf\x9c\x0a\x83\x80\x28\x56\xc7\x7f\x00\x54\x6a\x0c\xa9\xd0\x79\x48\xb9\x2c\x92\x30\xf5\xa9\x14\x52\x99\xf7\x57\x67\x84\xab\x8c\x9c\x05\x4b\x26\x92\x01\x44\xa8\x57\x8c\xe2\x2d\x51\x8a\x89\x34\xc8\xd1\x92\x84\x58\x32\x08\x00\x04\xc9\x71\x00\xaa\x88\x4d\x11\xef\x17\x56\x13\x19\x45\x28\x0e\xc0\x91\xf4\x4c\x69\x2c\xe6\x81\x73\xed\x49\xc6\xe7\x22\x8e\x8a\x38\x00\x58\x35\x06\xae\xce\x62\xb4\xe4\x2c\x00\x30\x95\x01\x9f\xa4\xb1\x13\x4f\xdc\xa9\x35\x56\x6a\x88\x62\xc6\xa9\xea\x04\x00\x1a\x8d\x2c\x34\x45\xe3\x24\x03\xf4\x6a\xe9\x15\xe1\xbc\x62\x9b\x1b\x9a\x61\x4e\x82\x2a\xfb\x2a\x1f\x2b\xf5\x51\xfb\xa2\x11\x35\xbe\x1c\xd4\x27\x00\x96\xe8\x14\xed\x47\x86\x3c\x19\x78\xd1\xf5\x0d\x4b\x66\x98\x2b\x4e\xac\x37\x4e\xcb\x3f\x90\x5a\xd3\x7f\x78\xa8\x3f\x1f\x1f\xfb\x95\x4e\x77\xe6\xd8\x1e\x1f\x3b\xcf\x38\x47\x44\x5c\xe0\xbd\xc1\x64\x26\x6f\x89\xa5\xd9\xb4\xd6\x5f\xb9\x6c\x75\x81\x3b\x86\x0d\x57\x84\x71\x97\xf4\x63\x31\x34\x06\xed\x58\xac\x50\x58\xa9\xcb\x01\x2c\x08\x37\x0d\x79\x13\xaf\x3a\x7e\x4f\xce\x54\xd0\xb4\xbc\xc8\x18\x6a\xa2\x69\xc6\x28\xe1\x53\x5c\xa0\x46\xb1\x41\xb2\x42\xd3\x96\xca\xc5\xbb\xf2\x6a\x73\x0e\xb0\xc4\x72\x73\x3c\xc5\xc5\x8e\xa1\x2f\xc8\x5a\xd4\x38\x1e\x2a\xce\xfd\x12\x34\x54\x33\x65\x7d\x86\xfc\xd9\x6b\xdd\x00\xcc\x32\x6c\x98\xc0\x66\xc4\x56\xcd\xab\x31\x02\x62\xe4\x52\xa4\x06\xac\x0c\x5b\x6c\xe9\x6a\x39\xd8\x92\x52\xe7\xc3\x33\x9b\xf6\x66\x66\x4b\x8e\x96\x85\x1a\x6c\xb4\xe5\x44\x90\x14\xf5\x8b\x35\xf1\x62\x6a\x16\xf1\xc6\xc1\x7d\x09\xfa\xfc\x9a\x91\x7c\x24\xc5\x82\xb5\x02\xab\x24\x67\xb4\x9c\xbc\x22\x7e\xce\x48\x3e\xaf\x48\x77\x38\x6f\x31\x8f\x51\x1f\xc4\x9f\x7b\xd2\x0d\xbf\x6e\x22\x5d\x85\xb6\x85\x50\xe5\xec\x1e\xf7\xdc\xaf\x4a\xab\x56\x26\x1e\x9e\xb3\x00\x9c\xc4\xc8\xcd\xa0\xfe\xfb\xef\x14\x6f\xcb\xec\xb7\x2a\xe1\x3d\xd4\xfb\xeb\xc8\x17\x8b\x95\x8a\xd1\xed\x52\xd9\xd4\x97\xbf\x3b\xb0\x86\x36\x2a\xdc\xe8\x24\x75\xd6\xcd\x9c\x80\x76\xd1\xac\x08\x2f\xf0\x35\xd0\xbc\x5a\x77\xe4\xa9\x9f\xe0\x82\xbf\xa9\xba\x27\x75\x47\x57\xde\x2b\x43\xa8\x41\x6b\x83\x4a\x82\x24\x99\x73\xb4\x16\x75\x5d\x05\x61\xfb\x68\x17\x34\x8f\xb2\x23\xb8\xf1\xf7\xb3\xe7\x78\xff\x57\x3e\xfd\x70\x38\x5a\xa0\xc4\x2c\xfd\x56\xa0\x2e\xe7\xd4\xb7\x94\xd0\xba\xdc\xdc\x45\xc1\x1f\x1e\xd7\x96\x5d\xc1\x34\x2b\xa0\x67\x77\xe9\xb5\xce\x18\xcd\xfc\x87\x66\x16\xc1\xd5\xfb\x01\x9d\xf9\x82\xa5\xbf\x39\x13\x67\x3b\xa6\x1d\x86\x68\xe3\xe0\xd1\x98\xb6\xa0\x0c\x1f\x1e\x9c\xad\x06\xed\x9c\x25\xfe\xdf\x1d\x48\xa9\x14\x6e\xf5\x45\x7d\xd0\xec\xdc\x3b\x07\xf7\x4f\x87\x76\x52\xbe\x94\x5d\x47\xcd\x03\x2f\xf0\x9f\x0c\x82\x27\xc6\x23\x27\xc0\x6e\x5d\x3d\x6b\xfd\x07\x2e\x0d\xcb\xdc\xcc\x97\x58\xce\xb7\x46\xc1\xeb\x89\x78\x7d\x1b\x8d\x74\xa9\xac\xbc\xc6\xd2\xa5\x5e\x8c\xee\x99\xe3\x1f\x0a\x4a\x4b\xeb\x56\x07\x42\xa9\xdb\xcf\xad\x84\x1c\x8d\x21\x29\x1a\x57\x87\x9c\x99\x0c\x93\x2d\x79\x52\x54\x1b\x86\x77\x29\x74\xef\x1e\xdd\xc4\xf0\xc4\xb8\xc8\xf4\xa3\x22\x6e\x96\x57\x27\x56\x16\x62\x7b\xa3\x78\x77\x52\xdf\xf6\x1e\x1e\x3e\x4f\xef\x7e\xbd\x1a\xcd\xe6\x93\xfb\xdb\x8b\xab\xe9\xe3\xe3\xcf\x29\x55\x3d\x43\x7a\x75\x0f\x60\x24\x0f\xd3\x9a\xba\x16\xe5\x32\xf6\xe4\xfd\x96\xc0\xbc\x30\x16\x32\xb2\x42\x38\xd1\x92\xa3\xe9\xfb\x0c\x5f\xe6\x26\xa4\x8d\xd7\x57\xc2\x7f\xa2\xbe\xc4\xfa\xe3\xc4\xf9\xda\x3c\xf6\xb6\xc4\x2d\x90\xd8\x42\x63\xb8\xdb\x01\x96\xb9\xb9\xc6\xf2\xa0\xf6\xd7\x06\xfc\xe8\x6a\xf5\x86\xbf\x52\xa8\x5b\x53\xdd\x20\x5f\xcc\x39\x13\xcb\x7d\x29\x53\xad\xe0\x73\x83\xd6\xbd\x06\x4d\xb8\xf5\x0c\xd8\xf8\x56\x9d\x1e\xd1\xda\xa3\x5d\x31\x6f\xdf\xdb\x9f\x1e\x17\x3b\x9d\xe8\x7f\xb6\x47\x6d\x66\xd0\x1b\x2f\x50\x3f\xd8\x90\xff\x0a\x00\x00\xff\xff\x39\xac\xfc\x07\x77\x11\x00\x00"), }, "/redis.yaml": &vfsgen۰CompressedFileInfo{ name: "redis.yaml", modTime: time.Time{}, - uncompressedSize: 1609, + uncompressedSize: 2056, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\x54\x4d\x73\xdb\x36\x10\xbd\xf3\x57\xec\x48\x97\x76\xc6\xa6\x6c\x1d\xd9\x93\x22\x3b\x0d\xa7\x8e\x34\x23\xc9\xcd\xe4\xe4\x81\x80\x25\xb9\x35\x08\xa0\xc0\x52\x8c\xea\xfa\xbf\x77\xc0\x0f\x47\xac\x13\x1d\x24\x0d\xf6\xed\x7b\x6f\x3f\x80\x39\xac\xad\x3b\x7b\x2a\x2b\x86\xe5\xcd\x72\x09\xbf\x5b\x5b\x6a\x84\x87\x87\x75\x32\x4f\xe6\xf0\x40\x12\x4d\x40\x05\x8d\x51\xe8\x81\x2b\x84\x95\x13\xb2\xc2\x31\x72\x05\x7f\xa2\x0f\x64\x0d\x2c\xd3\x1b\xf8\x25\x02\x66\x43\x68\xf6\xeb\x6f\xc9\x1c\xce\xb6\x81\x5a\x9c\xc1\x58\x86\x26\x20\x70\x45\x01\x0a\xd2\x08\xf8\x4d\xa2\x63\x20\x03\xd2\xd6\x4e\x93\x30\x12\xa1\x25\xae\x3a\x99\x81\x24\x4d\xe6\xf0\x75\xa0\xb0\x47\x16\x64\x40\x80\xb4\xee\x0c\xb6\xb8\xc4\x81\xe0\xce\x70\xf7\xa9\x98\x5d\xb6\x58\xb4\x6d\x9b\x8a\xce\x6d\x6a\x7d\xb9\xd0\x3d\x32\x2c\x1e\xf2\xf5\xfd\x66\x7f\x7f\xbd\x4c\x6f\xba\x9c\x47\xa3\x31\x04\xf0\xf8\x77\x43\x1e\x15\x1c\xcf\x20\x9c\xd3\x24\xc5\x51\x23\x68\xd1\x82\xf5\x20\x4a\x8f\xa8\x80\x6d\x34\xdc\x7a\x62\x32\xe5\x15\x04\x5b\x70\x2b\x3c\x26\x73\x50\x14\xd8\xd3\xb1\xe1\x49\xb7\x46\x7b\x14\x26\x00\x6b\x40\x18\x98\xad\xf6\x90\xef\x67\xf0\x61\xb5\xcf\xf7\x57\xc9\x1c\xbe\xe4\x87\x4f\xdb\xc7\x03\x7c\x59\xed\x76\xab\xcd\x21\xbf\xdf\xc3\x76\x07\xeb\xed\xe6\x2e\x3f\xe4\xdb\xcd\x1e\xb6\x1f\x61\xb5\xf9\x0a\x7f\xe4\x9b\xbb\x2b\x40\xe2\x0a\x3d\xe0\x37\xe7\xa3\x7f\xeb\x81\x62\x1f\x51\xc5\xa6\xed\x11\x27\x06\x0a\xdb\x1b\x0a\x0e\x25\x15\x24\x41\x0b\x53\x36\xa2\x44\x28\xed\x09\xbd\x21\x53\x82\x43\x5f\x53\x88\xd3\x0c\x20\x8c\x4a\xe6\xa0\xa9\x26\x16\xdc\x9d\xbc\x2b\x2a\x4d\x12\xe1\x68\x98\x7f\x06\xd2\x7a\x4c\xa5\xf1\x75\x2a\xb5\x6d\x54\x5a\x76\xab\x94\x4a\x5b\x2f\x4e\xb7\x42\xbb\x4a\xdc\x26\xcf\x64\x54\x06\x7b\xf4\x27\x92\xf8\x59\x38\x47\xa6\x4c\x6a\x64\xa1\x04\x8b\x2c\x01\x30\xa2\xc6\x0c\x3c\x2a\x0a\x3f\xe6\x1a\x30\xc1\x09\x89\x19\x44\xc8\x75\x38\x07\xc6\x3a\x89\x95\x7d\xa7\xd8\x45\x8a\x04\xe0\x34\xda\x3b\xdd\x1e\x91\xc5\x6d\x02\x10\x7a\xf9\x4f\x36\xf0\xa6\xc3\xce\x7a\xbd\x5e\x44\xb8\x28\x6d\xeb\x59\x02\xe0\x31\xd8\xc6\x4b\x0c\x91\x17\xe0\x7a\xe0\xee\x81\x4f\x5d\xd6\x13\x99\xc0\x71\x75\x93\x7e\xf7\xfa\x0a\x3b\xf5\x7c\x1a\x21\x75\xc0\xda\x69\xc1\x51\xf1\xe5\xc5\x79\xfb\x17\x4a\x7e\x7d\x5d\xbc\xbc\x78\x2c\xc9\x9a\xee\x6f\x54\x78\x7d\x9d\xbd\xcb\x59\x0b\xf3\x01\x1f\x03\xaa\x83\xfd\x2c\x58\x56\xbb\xc1\x5a\x5f\x01\xfb\x66\x94\x19\x3d\xaf\x4e\x82\x74\xdc\xe0\xdc\xac\x42\x40\xce\xcd\x09\x0d\x5b\x7f\x9e\xa0\xc7\xde\x0f\xb3\xc8\x86\xe3\xb1\x8b\xf1\xfb\xed\x48\x8b\x23\xea\x90\x0d\xbf\xff\x93\xcb\xef\xbe\xe7\xb2\xf0\x25\xf2\x47\x42\xad\x26\x14\x23\x76\x87\x05\x7a\x34\x6f\x7d\xed\x7b\xcb\xc5\x90\x21\x1a\xae\xac\xa7\x7f\x50\x3d\x19\xe4\xd6\xfa\xe7\x37\x14\x80\xc2\x20\x3d\x39\xee\x66\xfa\xef\xf5\x45\x04\xe0\x50\x21\x0c\x19\xf1\xa2\xb6\x15\xc9\xfe\x31\x19\x67\x14\xaf\xa1\xb4\xc6\xa0\x64\x54\x29\xe4\x05\x68\x2c\x78\xc2\xd1\x98\xe1\x86\xa0\xba\xea\x72\x15\x16\xa2\xd1\xfc\x46\xdc\x92\xd6\x70\xc4\xf8\x98\xa9\xf4\x22\xf5\x19\xcf\x97\xd6\x37\x3d\x7c\x87\xc5\x05\xa6\x3c\x3d\x67\x13\xb5\x7e\x5b\xd6\xb6\x76\x0d\xe3\xe6\x5d\xb1\x3f\x5c\xdf\x0b\x3a\x6f\x1b\x97\x75\xcf\x67\xc3\x3f\xb9\x7b\x17\x09\x93\xb1\x04\xd4\xc5\x93\x26\x33\xca\x49\x6b\xe2\xeb\x8a\x7e\x3a\x93\xb3\xc3\x0c\x86\x55\xbd\xa4\x1a\x67\x35\x86\xfe\x0b\x00\x00\xff\xff\xde\xd2\xe5\x62\x49\x06\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x55\x4f\x73\xe2\xb8\x13\xbd\xfb\x53\x74\xc1\xe5\xf7\xab\x4a\x4c\x92\x23\x7b\x62\x48\x66\xc7\x45\x80\x2a\x20\x3b\x35\x27\xaa\x91\xdb\xb6\x16\x59\xd2\x4a\x6d\x18\x6f\x36\xdf\x7d\x4b\xfe\x93\xc1\x93\x64\x6b\x38\x00\x25\xbd\x7e\xfd\xba\x5f\x4b\x1a\xc3\xdc\xd8\xda\xc9\xbc\x60\xb8\xbb\xb9\xbb\x83\xdf\x8d\xc9\x15\xc1\xe3\xe3\x3c\x1a\x47\x63\x78\x94\x82\xb4\xa7\x14\x2a\x9d\x92\x03\x2e\x08\x66\x16\x45\x41\xfd\xce\x15\xfc\x41\xce\x4b\xa3\xe1\x2e\xbe\x81\xff\x05\xc0\xa8\xdb\x1a\xfd\xff\xb7\x68\x0c\xb5\xa9\xa0\xc4\x1a\xb4\x61\xa8\x3c\x01\x17\xd2\x43\x26\x15\x01\x7d\x17\x64\x19\xa4\x06\x61\x4a\xab\x24\x6a\x41\x70\x96\x5c\x34\x69\x3a\x92\x38\x1a\xc3\xb7\x8e\xc2\x1c\x18\xa5\x06\x04\x61\x6c\x0d\x26\xbb\xc4\x01\x72\x23\xb8\xf9\x14\xcc\x76\x3a\x99\x9c\xcf\xe7\x18\x1b\xb5\xb1\x71\xf9\x44\xb5\x48\x3f\x79\x4c\xe6\x0f\xab\xed\xc3\xf5\x5d\x7c\xd3\xc4\x3c\x69\x45\xde\x83\xa3\xbf\x2a\xe9\x28\x85\x43\x0d\x68\xad\x92\x02\x0f\x8a\x40\xe1\x19\x8c\x03\xcc\x1d\x51\x0a\x6c\x82\xe0\xb3\x93\x2c\x75\x7e\x05\xde\x64\x7c\x46\x47\xd1\x18\x52\xe9\xd9\xc9\x43\xc5\x83\x6e\xf5\xf2\xa4\x1f\x00\x8c\x06\xd4\x30\x9a\x6d\x21\xd9\x8e\xe0\xd3\x6c\x9b\x6c\xaf\xa2\x31\x7c\x4d\x76\x5f\xd6\x4f\x3b\xf8\x3a\xdb\x6c\x66\xab\x5d\xf2\xb0\x85\xf5\x06\xe6\xeb\xd5\x7d\xb2\x4b\xd6\xab\x2d\xac\x3f\xc3\x6c\xf5\x0d\x16\xc9\xea\xfe\x0a\x48\x72\x41\x0e\xe8\xbb\x75\x41\xbf\x71\x20\x43\x1f\x29\x0d\x4d\xdb\x12\x0d\x04\x64\xa6\x15\xe4\x2d\x09\x99\x49\x01\x0a\x75\x5e\x61\x4e\x90\x9b\x13\x39\x2d\x75\x0e\x96\x5c\x29\x7d\x70\xd3\x03\xea\x34\x1a\x83\x92\xa5\x64\xe4\x66\xe5\x4d\x51\x71\x14\xa1\x95\x9d\xff\x53\x10\xc6\x51\x2c\xb4\x2b\x63\xa1\x4c\x95\xc6\x79\x33\x4a\xb1\x30\xe5\xe4\x74\x8b\xca\x16\x78\x1b\x1d\xa5\x4e\xa7\xb0\x25\x77\x92\x82\x96\x68\xad\xd4\x79\x54\x12\x63\x8a\x8c\xd3\x08\x40\x63\x49\x53\x70\x94\x4a\xff\x3e\x57\x87\xf1\x16\x05\x4d\x21\x40\xae\x7d\xed\x99\xca\x28\x54\xf6\x83\x62\x13\x28\x22\x80\x53\x2f\xef\x74\x7b\x20\xc6\xdb\x08\xc0\xb7\xe9\xbf\x18\xcf\xab\x06\x3b\x6a\xf3\xb5\x49\xd0\x86\xd4\xa6\x1c\x45\x00\x8e\xbc\xa9\x9c\x20\x1f\x78\x01\xae\x3b\xee\x16\xb8\x6f\xa2\xf6\x52\x7b\x0e\xa3\x1b\xb5\xb3\xd7\x56\xd8\x64\x4f\x86\x3b\x32\xdd\x51\x69\x15\x72\xc8\xf8\xfc\x6c\x9d\xf9\x93\x04\xbf\xbc\x4c\x9e\x9f\x1d\xe5\xd2\xe8\xe6\x6f\xc8\xf0\xf2\x32\x7a\x13\x33\x47\xfd\x89\x9e\x3c\xa5\x3b\xb3\x44\x16\xc5\xa6\x93\xd6\x56\xc0\xae\xea\xd3\xf4\x9a\x67\x27\x94\x2a\x4c\x70\xa2\x67\xde\x13\x27\xfa\x44\x9a\x8d\xab\x07\xe8\xbe\xf7\x9d\x17\xd3\x6e\xb9\xef\x62\xf8\x7e\x5d\x52\x78\x20\xe5\xa7\xdd\xef\x4f\xe9\x92\xfb\x1f\xb1\x8c\x2e\x27\xfe\x2c\x49\xa5\x03\x8a\x1e\xbb\xa1\x8c\x1c\xe9\xd7\xbe\xb6\xbd\xe5\xac\x8b\xc0\x8a\x0b\xe3\xe4\xdf\x94\xee\x35\xf1\xd9\xb8\xe3\x2b\x0a\x20\x25\x2f\x9c\xb4\xdc\x78\xfa\xcf\xf5\xc5\x0e\xc0\xae\x20\xe8\x22\xc2\x41\x3d\x17\x52\xb4\x97\x49\xef\x51\x38\x86\xc2\x68\x4d\x82\x29\x8d\x21\xc9\x40\x51\xc6\x03\x8e\x4a\x77\x27\x84\xd2\xab\x26\x36\xa5\x0c\x2b\xc5\xaf\xc4\x67\xa9\x14\x1c\x28\x5c\x66\x69\x7c\x11\x7a\xa4\xfa\x52\xfa\xaa\x85\x6f\x28\xbb\xc0\xe4\xa7\xe3\x74\x90\xad\x9d\x96\xb9\x29\x6d\xc5\xb4\x7a\x53\xec\xbb\xe3\x7b\x41\xe7\x4c\x65\xa7\xcd\xf5\x59\xf1\x07\x67\xef\x22\x60\x60\x8b\x27\x95\xed\x95\xd4\xc7\xf7\x1c\x10\x95\x67\x53\x92\xdb\x97\xa8\x31\xa7\x74\x7f\xa4\xfa\xe7\x52\x7b\xcc\xb2\x85\x2c\xa8\x1e\xd6\xfa\x5f\x46\x25\x65\x59\x71\x98\xcd\x18\xd6\x0d\x02\x55\xdc\xb8\xb7\x58\x6e\x03\x3b\xb8\x7e\x42\x80\x0b\xe4\xe6\x11\x39\xa3\xe6\xe0\x6a\xf3\x88\x98\x01\x1d\x69\xe1\x6a\xcb\xad\x5d\xc8\x08\xc8\x61\xd6\xb8\xbb\xf4\xa4\x6f\xcf\xe3\xeb\x18\x34\xce\x37\xeb\xd2\x0f\x88\xac\x33\x27\x99\x06\xe7\xe7\xcb\x87\x45\x98\x16\xd2\x41\xe6\xc0\xe8\x0f\x4c\x5c\x2c\xb7\xf3\xa0\xc2\x2c\x06\xbd\xfa\x35\x0b\x8f\xe5\xc7\xd7\xdd\x2f\xd9\x27\x8c\x0e\x8f\x23\xb9\xe1\x91\xaa\x2d\x4d\xa1\xbb\x69\x2e\xa9\x7a\xa3\xfb\xad\x7f\x03\x00\x00\xff\xff\xea\xc6\x01\x63\x08\x08\x00\x00"), }, "/resourcemanager.yaml": &vfsgen۰CompressedFileInfo{ name: "resourcemanager.yaml", @@ -202,9 +202,9 @@ var Assets = func() http.FileSystem { "/sql.yaml": &vfsgen۰CompressedFileInfo{ name: "sql.yaml", modTime: time.Time{}, - uncompressedSize: 4179, + uncompressedSize: 4496, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x56\x5f\x6f\xdb\x46\x0c\x7f\xd7\xa7\x20\xec\x87\x6e\x40\x62\x37\xd9\xb0\xa2\xde\xc3\xe0\x3a\xfd\x23\xd4\x75\xd0\x38\x59\xd1\x27\xe3\x7c\xa2\xa5\x9b\x4f\x77\x97\x23\x65\xd7\xc8\xf2\xdd\x87\x93\x25\xdb\x72\x12\x37\x59\x30\x0c\x9b\x1f\x02\x85\xe4\xf1\xf8\x23\x7f\xe4\xb1\x0d\x03\xeb\x56\x5e\xa5\x19\xc3\xe9\xcb\xd3\x53\x78\x6f\x6d\xaa\x11\x86\xc3\x41\xd4\x8e\xda\x30\x54\x12\x0d\x61\x02\x85\x49\xd0\x03\x67\x08\x7d\x27\x64\x86\xb5\xe6\x08\x7e\x47\x4f\xca\x1a\x38\xed\xbc\x84\x1f\x82\x41\xab\x52\xb5\x7e\xfc\x35\x6a\xc3\xca\x16\x90\x8b\x15\x18\xcb\x50\x10\x02\x67\x8a\x60\xa6\x34\x02\x7e\x93\xe8\x18\x94\x01\x69\x73\xa7\x95\x30\x12\x61\xa9\x38\x2b\xaf\xa9\x9c\x74\xa2\x36\x7c\xad\x5c\xd8\x29\x0b\x65\x40\x80\xb4\x6e\x05\x76\xb6\x6b\x07\x82\xcb\x80\xcb\x5f\xc6\xec\x7a\xdd\xee\x72\xb9\xec\x88\x32\xda\x8e\xf5\x69\x57\xaf\x2d\xa9\x3b\x8c\x07\x6f\x47\xe3\xb7\xc7\xa7\x9d\x97\xe5\x99\x2b\xa3\x91\x08\x3c\x5e\x17\xca\x63\x02\xd3\x15\x08\xe7\xb4\x92\x62\xaa\x11\xb4\x58\x82\xf5\x20\x52\x8f\x98\x00\xdb\x10\xf0\xd2\x2b\x56\x26\x3d\x02\xb2\x33\x5e\x0a\x8f\x51\x1b\x12\x45\xec\xd5\xb4\xe0\x46\xb6\xea\xf0\x14\x35\x0c\xac\x01\x61\xa0\xd5\x1f\x43\x3c\x6e\xc1\x9b\xfe\x38\x1e\x1f\x45\x6d\xf8\x12\x5f\x7e\x38\xbf\xba\x84\x2f\xfd\x8b\x8b\xfe\xe8\x32\x7e\x3b\x86\xf3\x0b\x18\x9c\x8f\xce\xe2\xcb\xf8\x7c\x34\x86\xf3\x77\xd0\x1f\x7d\x85\x8f\xf1\xe8\xec\x08\x50\x71\x86\x1e\xf0\x9b\xf3\x21\x7e\xeb\x41\x85\x3c\x62\x12\x92\x36\x46\x6c\x04\x30\xb3\xeb\x80\xc8\xa1\x54\x33\x25\x41\x0b\x93\x16\x22\x45\x48\xed\x02\xbd\x51\x26\x05\x87\x3e\x57\x14\xaa\x49\x20\x4c\x12\xb5\x41\xab\x5c\xb1\xe0\x52\x72\x07\x54\x27\x8a\x84\x53\x55\xfd\x7b\x20\xad\xc7\x8e\x34\x3e\xef\x48\x6d\x8b\xa4\x93\x96\x54\xea\x48\x9b\x77\x17\x27\x42\xbb\x4c\x9c\x44\x73\x65\x92\x1e\x8c\xd1\x2f\x94\xc4\x4f\xc2\x39\x65\xd2\x28\x47\x16\x89\x60\xd1\x8b\x00\x8c\xc8\xb1\x07\x74\xad\xef\xf7\x54\x59\x90\x13\x12\x7b\x10\x4c\x8e\x69\x45\x8c\x79\x14\x70\x6d\x1d\x8c\x3f\x0f\x23\x80\x45\x1d\xda\xe2\x64\x8a\x2c\x4e\x22\x00\x5a\x5f\xfd\xc1\x12\x8f\x4a\xcb\x16\x5d\x6b\x91\xe4\xca\x54\xb7\x08\xa7\x28\xdc\xd4\x8a\x00\x3c\x92\x2d\xbc\x44\x0a\x8e\x01\x8e\x2b\xe7\x6b\xc3\x09\x5d\xeb\x49\x08\x7b\x2a\x08\xa3\x35\xf1\x2a\x78\x9f\x87\x67\x4d\xb9\x4a\x2e\x31\x77\x5a\x70\xb8\xd0\x79\xfb\x07\x4a\xa6\xee\xcd\x4d\xf5\x79\x7b\xdb\x55\x86\x38\x34\x40\x90\xd6\xdf\xb7\xb7\xdd\xda\x7f\x10\x87\xcb\x6f\x6f\x5b\x77\x5c\x0e\x84\x79\x83\x57\x84\xc9\xa5\xfd\x24\x58\x66\x17\x55\xd4\x6b\x7c\xec\x8b\x3a\x8a\x3a\xcf\x55\xde\x7b\x95\xb8\xce\x59\xf8\x5b\x89\x6a\xe0\xf1\xd9\xd6\x88\x85\x4f\x91\xdf\x29\xd4\xc9\xbd\xb6\x17\x38\x43\x8f\x66\x93\xad\x75\xc6\xe6\xb8\xea\x41\x0d\xe8\x02\x67\x1b\x15\x00\xcf\x2a\x67\xb5\x7a\x47\x97\x20\x49\xaf\x1c\x97\xd5\xfb\xf3\x78\x47\x03\x70\x99\x21\x0c\x02\x2f\x42\xa2\x37\x87\x3b\x3b\x36\xe9\x62\xde\x6b\x1c\xd9\xd4\x25\xbe\x7b\xd5\xbd\x34\xd9\xf1\xe5\x6d\xe1\x0e\x33\xb2\xfe\x39\xe1\xd1\x70\x23\xe5\xd2\x9a\x30\xb3\xd0\x37\x92\xc2\x2b\x87\x3d\xa8\x8a\x7f\x5f\x46\x76\x55\x07\x58\x37\xd9\xcb\xdc\x43\x30\x1f\x5f\x78\x00\x2d\xa6\xa8\xa9\x07\x84\x1c\x46\x1c\x75\x0a\x42\x3f\x59\x4b\xff\x06\x3b\x9e\xc6\xfc\x67\x53\xbc\x0e\xad\xbf\x10\x4a\x87\xf1\x1d\x9b\x3e\x11\x72\x6c\x16\x68\xd8\xfa\xd5\xbd\xd6\x07\xa8\x9b\x0b\x62\xf4\xf1\x61\x02\xaf\x8d\x36\xd5\x98\x34\x12\xfa\x2f\xd1\xf1\x78\x1b\xde\xa6\x94\xca\x4d\xa4\x35\x33\x95\x16\xbe\x9c\xe9\x1d\xe7\xd5\x42\x30\x4e\x0c\xf2\xd2\xfa\xf9\xce\x4d\x25\xf8\x4a\x3d\x5a\x6b\x9b\xe0\x1f\x00\x35\xb0\xb9\x2b\x36\x47\x9e\x8c\x4b\xae\x8f\x7f\xb7\xd5\x1a\x74\x23\xd4\xb3\x89\x56\x66\x7e\x1f\x78\x34\xd2\xaf\xca\x41\x32\x99\xe3\x6a\xbf\x34\x25\xcc\xad\xc9\xc7\x4f\xe3\x41\xf8\xb4\x1f\x71\xf5\x28\xb8\xbb\x07\x9e\x0c\x76\x9e\xd3\xf3\x80\xaa\xd4\x58\x8f\x49\xa9\x6d\x50\x77\x53\xf1\x2a\x8c\x1d\x55\x82\x1a\xcb\x6c\x38\x6f\x19\x25\x37\xb5\x1e\x89\xad\xc7\xc9\x54\xc8\x79\x51\xb2\x85\xf1\x1b\xef\x18\x48\x6d\x0d\x42\x1b\xa6\xdd\x93\x57\xaf\x5f\xfd\xf4\xf3\xeb\x5f\x5e\xfd\xa3\xa3\x8e\x48\x4f\x24\x7a\xde\x9f\x70\xe3\xf1\x70\xb0\x15\xd3\x5c\xb9\x38\x77\xd6\x37\xa7\x6f\x78\xec\xd1\xbf\x47\x83\x5e\x30\x26\xf1\x59\x75\x67\x8b\x32\x71\x32\x99\x29\x93\xa2\x77\x5e\x19\x6e\x3d\x76\xb2\xed\x1f\x7c\xee\xfb\x4e\xa4\x03\x8c\x20\xdd\x77\xbd\x9d\x83\xed\x72\x79\x26\xe4\xc2\x95\x6b\x9c\x2a\x91\x96\xfb\x67\x41\x48\xeb\xb5\xae\x84\x0a\x69\x8d\x15\x54\x52\x6f\xc8\xfb\x8e\x8f\x80\x32\x5b\xe8\x04\x42\x87\x02\x67\xb6\x48\x33\x98\xa2\x14\x61\x3d\x4f\x4b\x2e\x86\x75\xb5\x5c\x7e\xd9\x42\x8a\x1c\xdc\xe4\x61\x2d\x7e\x11\xea\xf3\xe2\x89\xe3\x79\x26\x34\x7d\x7f\xe2\x1e\x58\x15\x1e\x5e\x14\xf6\xba\xf2\xd0\x5c\x3d\xd0\x90\x8f\x9e\xa9\xf7\x3c\xf0\x87\x56\x95\xc3\x8b\xca\xdd\x7e\x79\xa8\x5b\x1e\xdb\x2b\xe1\xa5\xde\xef\x93\xab\xad\xac\x41\xd2\x5d\x6e\x36\x18\x79\x73\x93\x59\xe2\xdf\xca\xaf\xe6\x73\xdc\x86\xc4\x22\x99\x17\x0c\x99\x58\x20\x08\xb8\xba\x18\x3e\x83\x0a\xff\xa1\x6d\xf4\x7f\xb5\x4f\x02\xe4\x05\x87\xe6\x7e\x53\xf0\x95\xf1\x28\x92\xf0\xcf\xdd\x37\xc4\x09\xa2\xa5\xf5\xc9\xf7\xde\x9a\xed\x83\x62\xb5\x92\xab\xe8\xaf\x00\x00\x00\xff\xff\x4d\x4e\x49\x4e\x53\x10\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x57\x5f\x6f\xdb\x38\x12\x7f\xd7\xa7\x18\xd8\x0f\x6d\x81\x44\x6e\x72\x87\x2b\xea\x7b\x38\x38\x4e\xff\x08\x75\x1d\x34\x4e\xae\xe8\x93\x41\x53\x63\x89\x6b\x8a\x64\xc8\x91\x5d\x21\x9b\xef\xbe\xa0\xfe\xd8\x96\xe3\xb8\xce\x76\x17\x8b\x5d\x3f\x04\xca\x70\x38\x9c\xdf\xcc\x6f\x38\xc3\x2e\x0c\xb5\x29\xac\x48\x52\x82\xf3\xd7\xe7\xe7\xf0\x41\xeb\x44\x22\x8c\x46\xc3\xa0\x1b\x74\x61\x24\x38\x2a\x87\x31\xe4\x2a\x46\x0b\x94\x22\x0c\x0c\xe3\x29\x36\x2b\x27\xf0\x7f\xb4\x4e\x68\x05\xe7\xe1\x6b\x78\xe9\x15\x3a\xf5\x52\xe7\xd5\x7f\x83\x2e\x14\x3a\x87\x8c\x15\xa0\x34\x41\xee\x10\x28\x15\x0e\xe6\x42\x22\xe0\x77\x8e\x86\x40\x28\xe0\x3a\x33\x52\x30\xc5\x11\x56\x82\xd2\xf2\x98\xda\x48\x18\x74\xe1\x5b\x6d\x42\xcf\x88\x09\x05\x0c\xb8\x36\x05\xe8\xf9\xb6\x1e\x30\x2a\x1d\x2e\x7f\x29\x91\xe9\xf7\x7a\xab\xd5\x2a\x64\xa5\xb7\xa1\xb6\x49\x4f\x56\x9a\xae\x37\x8a\x86\xef\xc6\x93\x77\xa7\xe7\xe1\xeb\x72\xcf\xad\x92\xe8\x1c\x58\xbc\xcb\x85\xc5\x18\x66\x05\x30\x63\xa4\xe0\x6c\x26\x11\x24\x5b\x81\xb6\xc0\x12\x8b\x18\x03\x69\xef\xf0\xca\x0a\x12\x2a\x39\x01\xa7\xe7\xb4\x62\x16\x83\x2e\xc4\xc2\x91\x15\xb3\x9c\x5a\xd1\x6a\xdc\x13\xae\xa5\xa0\x15\x30\x05\x9d\xc1\x04\xa2\x49\x07\x2e\x06\x93\x68\x72\x12\x74\xe1\x6b\x74\xf3\xf1\xea\xf6\x06\xbe\x0e\xae\xaf\x07\xe3\x9b\xe8\xdd\x04\xae\xae\x61\x78\x35\xbe\x8c\x6e\xa2\xab\xf1\x04\xae\xde\xc3\x60\xfc\x0d\x3e\x45\xe3\xcb\x13\x40\x41\x29\x5a\xc0\xef\xc6\x7a\xff\xb5\x05\xe1\xe3\x88\xb1\x0f\xda\x04\xb1\xe5\xc0\x5c\x57\x0e\x39\x83\x5c\xcc\x05\x07\xc9\x54\x92\xb3\x04\x21\xd1\x4b\xb4\x4a\xa8\x04\x0c\xda\x4c\x38\x9f\x4d\x07\x4c\xc5\x41\x17\xa4\xc8\x04\x31\x2a\x25\x8f\x40\x85\x41\xc0\x8c\xa8\xf3\xdf\x07\xae\x2d\x86\x5c\xd9\x2c\xe4\x52\xe7\x71\x98\x94\x54\x0a\xb9\xce\x7a\xcb\x33\x26\x4d\xca\xce\x82\x85\x50\x71\x1f\x26\x68\x97\x82\xe3\x67\x66\x8c\x50\x49\x90\x21\xb1\x98\x11\xeb\x07\x00\x8a\x65\xd8\x07\x77\x27\xf7\x5b\xaa\x35\x9c\x61\x1c\xfb\xe0\x55\x4e\x5d\xe1\x08\xb3\xc0\xe3\xda\x18\x98\x7c\x19\x05\x00\xcb\xc6\xb5\xe5\xd9\x0c\x89\x9d\x05\x00\xae\x3a\xfa\xa3\x76\x34\x2e\x35\x3b\xee\x4e\xb2\x38\x13\xaa\x3e\x85\x19\xe1\xfc\x49\x9d\x00\xc0\xa2\xd3\xb9\xe5\xe8\xbc\x61\x80\xd3\xda\x78\xa5\x38\x75\x77\x72\xea\xdd\x9e\x31\x87\x41\x45\xbc\x1a\xde\x97\xd1\x65\x5b\x2e\xe2\x1b\xcc\x8c\x64\xe4\x0f\x34\x56\xff\x82\x9c\x5c\xef\xfe\xbe\xfe\x7c\x78\xe8\x09\xe5\xc8\x17\x80\x97\x36\xdf\x0f\x0f\xbd\xc6\xbe\x17\xfb\xc3\x1f\x1e\x3a\x8f\x4c\x0e\x99\xba\xc0\x5b\x87\xf1\x8d\xfe\xcc\x88\xa7\xd7\xb5\xd7\x15\x3e\xb2\x79\xe3\x45\x13\xe7\x3a\xee\xfd\x5a\xdc\xc4\xcc\xff\xad\x45\x0d\xf0\xe8\x72\xa3\x44\xcc\x26\x48\xef\x05\xca\x78\xaf\xee\x35\xce\xd1\xa2\x5a\x47\xab\x8a\xd8\x02\x8b\x3e\x34\x80\xae\x71\xbe\x5e\x02\xa0\x79\x6d\xac\x59\xde\x5a\x8b\xd1\x71\x2b\x0c\x95\xd9\xfb\xf5\x74\x6b\x05\xe0\x26\x45\x18\x7a\x5e\xf8\x40\xaf\x37\x87\x5b\x3a\xc9\x72\xd1\x6f\x6d\x59\xe7\x25\x7a\x7c\xd4\x5e\x9a\x6c\xd9\xb2\x3a\x37\x87\x19\xd9\xfc\x0c\xb3\xa8\xa8\x15\x72\xae\x95\xbf\xb3\xd0\xb6\x82\x42\x85\xc1\x3e\xd4\xc9\xdf\x17\x91\xed\xa5\x03\xac\x9b\xee\x44\xee\x29\x98\xc7\x27\x1e\x40\xb2\x19\x4a\xd7\x07\x87\xe4\xaf\x38\x17\xe6\x0e\xed\xb4\x92\xfe\x0e\x76\x3c\x8f\xf9\x3f\x4d\xf1\xc6\xb5\xc1\x92\x09\xe9\xaf\xef\x48\x0d\x9c\x43\x8a\xd4\x12\x15\x69\x5b\xec\xd5\x3e\x40\xdd\x8c\x39\x42\x1b\x1d\x26\x70\xa5\xb4\xce\xc6\xb4\x15\xd0\xbf\x88\x8e\xa7\x1b\xf7\xd6\xa9\x14\x66\xca\xb5\x9a\x8b\x24\xb7\xe5\x9d\x1e\x1a\x2b\x96\x8c\x70\xaa\x90\x56\xda\x2e\xb6\x4e\x2a\xc1\xd7\xcb\xe3\x6a\xb5\x0d\xfe\x09\x50\x43\x9d\x99\x7c\xbd\xe5\xd9\xb8\x78\xb5\xfd\x87\xa5\xd6\xa2\x9b\x43\x39\x9f\x4a\xa1\x16\xfb\xc0\xa3\xe2\xb6\x28\x2f\x92\xe9\x02\x8b\xdd\xd4\x94\x30\x37\x2a\x9f\x3e\x4f\x86\xfe\x53\x7f\xc2\xe2\x28\xb8\xdb\x1b\x9e\x0d\x76\x91\xb9\x3f\x10\xe8\x3a\xcb\xfe\x86\xf0\x6d\x0e\xed\x94\xe5\xb1\xa0\x3a\xe7\xe1\x2c\xe7\x0b\xa4\x5d\xf0\x95\xb4\x0d\xb6\x75\xfb\xfa\xeb\xd6\x47\xad\x19\xb8\x62\x74\x24\x54\xc9\x9f\x7a\x33\xbc\xc4\x30\x09\x4f\x20\x71\xfd\x5e\x2f\x2b\x2a\xe1\xab\x63\x2e\x64\xd2\x96\x25\x78\xb1\xeb\xd8\x91\x35\x50\xed\x7e\x5e\x08\x73\x2b\x9b\xdb\x25\x51\xda\x62\x5c\xca\x5b\x75\xbf\x0e\x64\xed\xc4\xd6\x52\x8c\x12\x4b\x2a\x19\xab\x09\x39\xb5\x57\x2d\x7a\x97\x70\x3a\x63\x7c\x91\x97\xa5\x46\xf8\x9d\xb6\x14\xb8\xd4\x0a\xa1\x0b\xb3\xde\xd9\x9b\xb7\x6f\xfe\xf5\xef\xb7\xff\x79\xf3\xa7\xf6\x09\xe7\xe4\x94\xa3\xa5\xdd\xf6\x30\x99\x8c\x86\x1b\xb1\x5b\x08\x13\x65\x46\xdb\x76\xeb\xaa\x28\xf4\x01\x15\x5a\x46\x18\x47\x97\xf5\x99\x1d\x97\xb2\xb3\xe9\x5c\xa8\x04\xad\xb1\x42\x51\xe7\xd8\xb6\xb0\xbb\xf1\x67\x87\x23\xe7\xa4\x87\xe1\xa5\xbb\xa6\x37\x4d\xa4\x5b\xbe\x3c\x1c\x52\x6e\xca\x19\x58\x94\x48\xcb\xe1\x3d\x77\xe8\xaa\x99\xb8\x84\x0a\x49\x83\x15\x44\xdc\xb0\x7d\xd7\xf0\x09\xb8\x54\xe7\x32\x06\x7f\xbd\x01\xa5\x3a\x4f\x52\x98\x21\x67\xfe\x6d\x93\x94\x2c\xf4\xb3\x7e\xf9\x72\x20\x0d\x09\x92\x37\x93\xf9\x37\xc5\x0b\x9f\x9f\x17\xcf\xec\x6d\x73\x26\xdd\x8f\xdb\xd5\x81\x39\xeb\xe9\x29\x6b\xa7\x28\x0f\x35\xa5\x03\xe5\x78\x74\x43\xda\x33\x1d\x1d\x9a\xf3\x0e\x4f\x79\x8f\xeb\xe5\xa9\x6a\x39\xb6\x56\xfc\x98\xb3\x5b\x27\xb7\x1b\x59\x8b\xa4\xdb\xdc\x6c\x31\xf2\xfe\x3e\xd5\x8e\xfe\x57\x7e\xb5\x67\x99\x2e\xc4\x1a\x9d\x7a\x41\x90\xb2\x25\x02\x83\xdb\xeb\xd1\x4f\x50\xe1\x6f\x34\xca\xff\xa3\x86\x71\x80\x2c\x27\x5f\xdc\x17\x39\xdd\x2a\x8b\x2c\xf6\xff\x3c\xee\x21\x86\x39\xb7\xd2\x36\xfe\x51\xaf\xd9\x34\x14\x2d\x05\x2f\x82\xdf\x02\x00\x00\xff\xff\xf6\x6d\x15\x4f\x90\x11\x00\x00"), }, "/storage.yaml": &vfsgen۰CompressedFileInfo{ name: "storage.yaml", diff --git a/pkg/test/resourcefixture/contexts/compute_context.go b/pkg/test/resourcefixture/contexts/compute_context.go index ac61db6050..7914400a80 100644 --- a/pkg/test/resourcefixture/contexts/compute_context.go +++ b/pkg/test/resourcefixture/contexts/compute_context.go @@ -37,12 +37,23 @@ func init() { resourceContextMap["cloudfunctioncomputeregionnetworkendpointgroup"] = ResourceContext{ ResourceKind: "ComputeRegionNetworkEndpointGroup", - SkipUpdate: true, + // The GCP resource for ComputeRegionNetworkEndpointGroup doesn't + // support update. + SkipUpdate: true, } resourceContextMap["cloudruncomputeregionnetworkendpointgroup"] = ResourceContext{ ResourceKind: "ComputeRegionNetworkEndpointGroup", - SkipUpdate: true, + // The GCP resource for ComputeRegionNetworkEndpointGroup doesn't + // support update. + SkipUpdate: true, + } + + resourceContextMap["privateserviceconnectioncomputeregionnetworkendpointgroup"] = ResourceContext{ + ResourceKind: "ComputeRegionNetworkEndpointGroup", + // The GCP resource for ComputeRegionNetworkEndpointGroup doesn't + // support update. + SkipUpdate: true, } resourceContextMap["computenetworkendpointgroup"] = ResourceContext{ diff --git a/pkg/test/resourcefixture/contexts/sql_context.go b/pkg/test/resourcefixture/contexts/sql_context.go index bcbe243d1f..959d92bbc1 100644 --- a/pkg/test/resourcefixture/contexts/sql_context.go +++ b/pkg/test/resourcefixture/contexts/sql_context.go @@ -15,7 +15,13 @@ package contexts func init() { - resourceContextMap["sqlinstance"] = ResourceContext{ + resourceContextMap["mysqlinstance"] = ResourceContext{ + // SQL instances names are reserved for 1 week after use: https://cloud.google.com/sql/docs/mysql/delete-instance + SkipDriftDetection: true, + ResourceKind: "SQLInstance", + } + + resourceContextMap["sqlserverinstance"] = ResourceContext{ // SQL instances names are reserved for 1 week after use: https://cloud.google.com/sql/docs/mysql/delete-instance SkipDriftDetection: true, ResourceKind: "SQLInstance", diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeregionnetworkendpointgroup/privateserviceconnectioncomputeregionnetworkendpointgroup/create.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeregionnetworkendpointgroup/privateserviceconnectioncomputeregionnetworkendpointgroup/create.yaml new file mode 100644 index 0000000000..670949e7c7 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeregionnetworkendpointgroup/privateserviceconnectioncomputeregionnetworkendpointgroup/create.yaml @@ -0,0 +1,28 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeRegionNetworkEndpointGroup +metadata: + name: computeregionnetworkendpointgroup-${uniqueId} + annotations: + cnrm.cloud.google.com/project-id: project-${uniqueId} +spec: + region: us-west3 + networkEndpointType: PRIVATE_SERVICE_CONNECT + pscTargetService: https://www.googleapis.com/compute/v1/projects/project-${uniqueId}/regions/us-west3/serviceAttachments/computeserviceattachment-${uniqueId} + networkRef: + name: default + subnetworkRef: + name: default diff --git a/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeregionnetworkendpointgroup/privateserviceconnectioncomputeregionnetworkendpointgroup/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeregionnetworkendpointgroup/privateserviceconnectioncomputeregionnetworkendpointgroup/dependencies.yaml new file mode 100644 index 0000000000..7f29e0cee3 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/compute/v1beta1/computeregionnetworkendpointgroup/privateserviceconnectioncomputeregionnetworkendpointgroup/dependencies.yaml @@ -0,0 +1,118 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 +kind: Project +metadata: + name: project-${uniqueId} +spec: + organizationRef: + external: ${TEST_ORG_ID} + name: project-${uniqueId} + billingAccountRef: + external: ${TEST_BILLING_ACCOUNT_ID} +--- +apiVersion: serviceusage.cnrm.cloud.google.com/v1beta1 +kind: Service +metadata: + annotations: + cnrm.cloud.google.com/disable-on-destroy: "false" + name: compute.googleapis.com +spec: + projectRef: + name: project-${uniqueId} +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeNetwork +metadata: + name: default + annotations: + cnrm.cloud.google.com/project-id: project-${uniqueId} + cnrm.cloud.google.com/deletion-policy: "abandon" + cnrm.cloud.google.com/management-conflict-prevention-policy: "none" +spec: + description: Default network for the project +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeSubnetwork +metadata: + annotations: + cnrm.cloud.google.com/project-id: project-${uniqueId} + name: computesubnetwork-${uniqueId} +spec: + region: us-west3 + ipCidrRange: 10.2.0.0/16 + networkRef: + name: default + purpose: PRIVATE_SERVICE_CONNECT +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeSubnetwork +metadata: + name: default + annotations: + cnrm.cloud.google.com/project-id: project-${uniqueId} + cnrm.cloud.google.com/deletion-policy: "abandon" + cnrm.cloud.google.com/management-conflict-prevention-policy: "none" +spec: + ipCidrRange: 10.180.0.0/20 + region: us-west3 + networkRef: + name: default +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeBackendService +metadata: + annotations: + cnrm.cloud.google.com/project-id: project-${uniqueId} + name: computebackendservice-${uniqueId} +spec: + location: us-west3 + networkRef: + name: default + loadBalancingScheme: INTERNAL +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeForwardingRule +metadata: + annotations: + cnrm.cloud.google.com/project-id: project-${uniqueId} + name: computeforwardingrule-${uniqueId} +spec: + location: us-west3 + networkRef: + name: default + subnetworkRef: + name: default + loadBalancingScheme: INTERNAL + backendServiceRef: + name: computebackendservice-${uniqueId} + networkTier: PREMIUM + allPorts: true +--- +apiVersion: compute.cnrm.cloud.google.com/v1beta1 +kind: ComputeServiceAttachment +metadata: + name: computeserviceattachment-${uniqueId} +spec: + projectRef: + name: project-${uniqueId} + location: us-west3 + description: A sample service attachment + targetServiceRef: + name: computeforwardingrule-${uniqueId} + connectionPreference: ACCEPT_AUTOMATIC + natSubnets: + - name: computesubnetwork-${uniqueId} + enableProxyProtocol: false diff --git a/pkg/test/resourcefixture/testdata/basic/container/v1beta1/containercluster/create.yaml b/pkg/test/resourcefixture/testdata/basic/container/v1beta1/containercluster/create.yaml index fae45108ce..7861e8eb58 100644 --- a/pkg/test/resourcefixture/testdata/basic/container/v1beta1/containercluster/create.yaml +++ b/pkg/test/resourcefixture/testdata/basic/container/v1beta1/containercluster/create.yaml @@ -40,4 +40,17 @@ spec: monitoringConfig: enableComponents: - "SYSTEM_COMPONENTS" - - "WORKLOADS" \ No newline at end of file + - "WORKLOADS" + clusterAutoscaling: + enabled: true + autoscalingProfile: BALANCED + resourceLimits: + - resourceType: cpu + maximum: 100 + minimum: 10 + - resourceType: memory + maximum: 1000 + minimum: 100 + autoProvisioningDefaults: + bootDiskKMSKeyRef: + name: kmscryptokey-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/container/v1beta1/containercluster/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/container/v1beta1/containercluster/dependencies.yaml index ed822e508c..fa7cd24cd5 100644 --- a/pkg/test/resourcefixture/testdata/basic/container/v1beta1/containercluster/dependencies.yaml +++ b/pkg/test/resourcefixture/testdata/basic/container/v1beta1/containercluster/dependencies.yaml @@ -34,3 +34,25 @@ apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 kind: PubSubTopic metadata: name: pubsubtopic-${uniqueId} +--- +apiVersion: kms.cnrm.cloud.google.com/v1beta1 +kind: KMSKeyRing +metadata: + name: kmskeyring-${uniqueId} +spec: + location: us-central1 +--- +apiVersion: kms.cnrm.cloud.google.com/v1beta1 +kind: KMSCryptoKey +metadata: + labels: + key-one: value-one + name: kmscryptokey-${uniqueId} +spec: + keyRingRef: + name: kmskeyring-${uniqueId} + purpose: ASYMMETRIC_SIGN + versionTemplate: + algorithm: EC_SIGN_P384_SHA384 + protectionLevel: SOFTWARE + importOnly: false diff --git a/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/create.yaml b/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/basicpubsubsubscription/create.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/create.yaml rename to pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/basicpubsubsubscription/create.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/basicpubsubsubscription/dependencies.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/dependencies.yaml rename to pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/basicpubsubsubscription/dependencies.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/update.yaml b/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/basicpubsubsubscription/update.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/update.yaml rename to pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/basicpubsubsubscription/update.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/bigquerypubsubsubscription/create.yaml b/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/bigquerypubsubsubscription/create.yaml new file mode 100644 index 0000000000..f56f5d5afe --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/bigquerypubsubsubscription/create.yaml @@ -0,0 +1,28 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubSubscription +metadata: + labels: + label-one: "value-one" + annotations: + cnrm.cloud.google.com/project-id: ${projectId} + name: pubsubsubscription-${uniqueId} +spec: + bigqueryConfig: + tableRef: + name: bigquerytable-1-${uniqueId} + topicRef: + name: pubsubtopic-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/bigquerypubsubsubscription/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/bigquerypubsubsubscription/dependencies.yaml new file mode 100644 index 0000000000..19fe071483 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/bigquerypubsubsubscription/dependencies.yaml @@ -0,0 +1,105 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubTopic +metadata: + annotations: + cnrm.cloud.google.com/project-id: ${projectId} + name: pubsubtopic-${uniqueId} +--- +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + annotations: + # Abandoning the IAMPolicyMember as the underlying IAM binding is shared by + # TestAcquire and TestCreateNoChangeUpdateDelete, and shouldn't be deleted + # when only one of the test finishes. + cnrm.cloud.google.com/deletion-policy: "abandon" + name: iampolicymember-1-${uniqueId} +spec: + member: serviceAccount:service-${projectNumber}@gcp-sa-pubsub.iam.gserviceaccount.com + role: roles/bigquery.metadataViewer + resourceRef: + apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 + kind: Project + external: projects/${projectId} +--- +apiVersion: iam.cnrm.cloud.google.com/v1beta1 +kind: IAMPolicyMember +metadata: + annotations: + # Abandoning the IAMPolicyMember as the underlying IAM binding is shared by + # TestAcquire and TestCreateNoChangeUpdateDelete, and shouldn't be deleted + # when only one of the test finishes. + cnrm.cloud.google.com/deletion-policy: "abandon" + name: iampolicymember-2-${uniqueId} +spec: + member: serviceAccount:service-${projectNumber}@gcp-sa-pubsub.iam.gserviceaccount.com + role: roles/bigquery.dataEditor + resourceRef: + apiVersion: resourcemanager.cnrm.cloud.google.com/v1beta1 + kind: Project + external: projects/${projectId} +--- +apiVersion: bigquery.cnrm.cloud.google.com/v1beta1 +kind: BigQueryDataset +metadata: + annotations: + cnrm.cloud.google.com/project-id: ${projectId} + name: bigquerydataset-${uniqueId} +spec: + resourceID: bigquerydataset${uniqueId} +--- +apiVersion: bigquery.cnrm.cloud.google.com/v1beta1 +kind: BigQueryTable +metadata: + annotations: + cnrm.cloud.google.com/project-id: ${projectId} + name: bigquerytable-1-${uniqueId} +spec: + resourceID: bigquerytable1${uniqueId} + friendlyName: bigquerytable-1-${uniqueId} + datasetRef: + name: bigquerydataset-${uniqueId} + schema: > + [ + { + "name": "data", + "type": "STRING", + "mode": "NULLABLE", + "description": "The data" + } + ] +--- +apiVersion: bigquery.cnrm.cloud.google.com/v1beta1 +kind: BigQueryTable +metadata: + annotations: + cnrm.cloud.google.com/project-id: ${projectId} + name: bigquerytable-2-${uniqueId} +spec: + resourceID: bigquerytable2${uniqueId} + friendlyName: bigquerytable-2-${uniqueId} + datasetRef: + name: bigquerydataset-${uniqueId} + schema: > + [ + { + "name": "data", + "type": "STRING", + "mode": "NULLABLE", + "description": "The data" + } + ] diff --git a/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/bigquerypubsubsubscription/update.yaml b/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/bigquerypubsubsubscription/update.yaml new file mode 100644 index 0000000000..7af9fbcae5 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/pubsub/v1beta1/pubsubsubscription/bigquerypubsubsubscription/update.yaml @@ -0,0 +1,28 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: pubsub.cnrm.cloud.google.com/v1beta1 +kind: PubSubSubscription +metadata: + labels: + label-one: "value-two" + annotations: + cnrm.cloud.google.com/project-id: ${projectId} + name: pubsubsubscription-${uniqueId} +spec: + bigqueryConfig: + tableRef: + name: bigquerytable-2-${uniqueId} + topicRef: + name: pubsubtopic-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/create.yaml b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/mysqlinstance/create.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/create.yaml rename to pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/mysqlinstance/create.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/update.yaml b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/mysqlinstance/update.yaml similarity index 100% rename from pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/update.yaml rename to pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/mysqlinstance/update.yaml diff --git a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/create.yaml b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/create.yaml new file mode 100644 index 0000000000..f57384d99c --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/create.yaml @@ -0,0 +1,30 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLInstance +metadata: + labels: + label-one: "value-one" + name: sqlinstance-sample-${uniqueId} +spec: + region: us-central1 + databaseVersion: SQLSERVER_2017_EXPRESS + settings: + tier: db-custom-1-3840 + sqlServerAuditConfig: + bucketRef: + name: storagebucket-${uniqueId} + rootPassword: + value: "1234" diff --git a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/dependencies.yaml b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/dependencies.yaml new file mode 100644 index 0000000000..0aebf79bad --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/dependencies.yaml @@ -0,0 +1,18 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: storage.cnrm.cloud.google.com/v1beta1 +kind: StorageBucket +metadata: + name: storagebucket-${uniqueId} diff --git a/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/update.yaml b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/update.yaml new file mode 100644 index 0000000000..32058de910 --- /dev/null +++ b/pkg/test/resourcefixture/testdata/basic/sql/v1beta1/sqlinstance/sqlserverinstance/update.yaml @@ -0,0 +1,31 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: sql.cnrm.cloud.google.com/v1beta1 +kind: SQLInstance +metadata: + labels: + label-one: "value-one" + newkey: "newval" + name: sqlinstance-sample-${uniqueId} +spec: + region: us-central1 + databaseVersion: SQLSERVER_2017_EXPRESS + settings: + tier: db-custom-1-3840 + sqlServerAuditConfig: + bucketRef: + name: storagebucket-${uniqueId} + rootPassword: + value: "1234" diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computebackendbucket.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computebackendbucket.md index 41c8f6a647..45bde86664 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computebackendbucket.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computebackendbucket.md @@ -45,9 +45,27 @@ Can Be Referenced by IAMPolicy/IAMPolicyMember + Yes + + + + Supports IAM Conditions No + + Supports IAM Audit Configs + No + + + IAM External Reference Format + + +

{% verbatim %}projects/{{project}}/global/backendBuckets/{{name}}{% endverbatim %}

+ + + + @@ -78,6 +96,8 @@ bucketRef: name: string namespace: string cdnPolicy: + bypassCacheOnRequestHeaders: + - headerName: string cacheKeyPolicy: includeHttpHeaders: - string @@ -91,6 +111,7 @@ cdnPolicy: negativeCachingPolicy: - code: integer ttl: integer + requestCoalescing: boolean serveWhileStale: integer signedUrlCacheMaxAgeSec: integer customResponseHeaders: @@ -158,6 +179,36 @@ resourceID: string

{% verbatim %}Cloud CDN configuration for this Backend Bucket.{% endverbatim %}

+ + +

cdnPolicy.bypassCacheOnRequestHeaders

+

Optional

+ + +

list (object)

+

{% verbatim %}Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.{% endverbatim %}

+ + + + +

cdnPolicy.bypassCacheOnRequestHeaders[]

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

cdnPolicy.bypassCacheOnRequestHeaders[].headerName

+

Optional

+ + +

string

+

{% verbatim %}The header field name to match on when bypassing cache. Values are case-insensitive.{% endverbatim %}

+ +

cdnPolicy.cacheKeyPolicy

@@ -306,6 +357,16 @@ can be specified as values, and you cannot specify a status code more than once. (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.{% endverbatim %}

+ + +

cdnPolicy.requestCoalescing

+

Optional

+ + +

boolean

+

{% verbatim %}If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.{% endverbatim %}

+ +

cdnPolicy.serveWhileStale

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computebackendservice.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computebackendservice.md index e93d315564..7b70796a22 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computebackendservice.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computebackendservice.md @@ -279,7 +279,10 @@ When the load balancing scheme is INTERNAL, this field is not used.{% endverbati For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) -and CONNECTION (for TCP/SSL). Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"].{% endverbatim %}

+and CONNECTION (for TCP/SSL). + +See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) +for an explanation of load balancing modes. Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"].{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstance.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstance.md index 0805e0eb25..214bf77b32 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstance.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstance.md @@ -214,6 +214,7 @@ resourcePolicies: namespace: string scheduling: automaticRestart: boolean + instanceTerminationAction: string minNodeCpus: integer nodeAffinities: - value: {} @@ -1525,6 +1526,16 @@ zone: string

{% verbatim %}Specifies if the instance should be restarted if it was terminated by Compute Engine (not a user).{% endverbatim %}

+ + +

scheduling.instanceTerminationAction

+

Optional

+ + +

string

+

{% verbatim %}Specifies the action GCE should take when SPOT VM is preempted.{% endverbatim %}

+ +

scheduling.minNodeCpus

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstancegroupmanager.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstancegroupmanager.md index 20e66b065f..868c763399 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstancegroupmanager.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstancegroupmanager.md @@ -447,7 +447,7 @@ Allowed value: The Google Cloud resource name of a `Project` resource (format: `

string

-

{% verbatim %}The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account: {projectNumber}@cloudservices.gserviceaccount.com is used. +

{% verbatim %}The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account {projectNumber}@cloudservices.gserviceaccount.com is used. Allowed value: The `email` field of an `IAMServiceAccount` resource.{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstancetemplate.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstancetemplate.md index 8063dfa98f..fd39d278f2 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstancetemplate.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeinstancetemplate.md @@ -164,6 +164,7 @@ reservationAffinity: resourceID: string scheduling: automaticRestart: boolean + instanceTerminationAction: string minNodeCpus: integer nodeAffinities: - value: {} @@ -1172,6 +1173,16 @@ tags:

{% verbatim %}Immutable. Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). This defaults to true.{% endverbatim %}

+ + +

scheduling.instanceTerminationAction

+

Optional

+ + +

string

+

{% verbatim %}Immutable. Specifies the action GCE should take when SPOT VM is preempted.{% endverbatim %}

+ +

scheduling.minNodeCpus

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computenetworkendpointgroup.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computenetworkendpointgroup.md index 779586ec8c..8c1e9f870e 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computenetworkendpointgroup.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computenetworkendpointgroup.md @@ -140,7 +140,9 @@ endpoint groups (see https://cloud.google.com/load-balancing/docs/hybrid). Note that NON_GCP_PRIVATE_IP_PORT can only be used with Backend Services that 1) have the following load balancing schemes: EXTERNAL, EXTERNAL_MANAGED, INTERNAL_MANAGED, and INTERNAL_SELF_MANAGED and 2) support the RATE or -CONNECTION balancing modes. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"].{% endverbatim %}

+CONNECTION balancing modes. + +Possible values include: GCE_VM_IP, GCE_VM_IP_PORT, and NON_GCP_PRIVATE_IP_PORT. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP", "GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"].{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeregionnetworkendpointgroup.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeregionnetworkendpointgroup.md index b2ec4685d0..36046c02c5 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeregionnetworkendpointgroup.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computeregionnetworkendpointgroup.md @@ -88,9 +88,17 @@ cloudRun: urlMask: string description: string networkEndpointType: string +networkRef: + external: string + name: string + namespace: string pscTargetService: string region: string resourceID: string +subnetworkRef: + external: string + name: string + namespace: string ``` @@ -277,6 +285,48 @@ you create the resource.{% endverbatim %}

{% verbatim %}Immutable. Type of network endpoints in this network endpoint group. Defaults to SERVERLESS Default value: "SERVERLESS" Possible values: ["SERVERLESS", "PRIVATE_SERVICE_CONNECT"].{% endverbatim %}

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

networkRef

+

Optional

+
+

object

+

{% verbatim %}Immutable. This field is only used for PSC. +The URL of the network to which all network endpoints in the NEG belong. Uses +"default" project network if unspecified.{% endverbatim %}

+
+

networkRef.external

+

Optional

+
+

string

+

{% verbatim %}Allowed value: The `selfLink` field of a `ComputeNetwork` resource.{% endverbatim %}

+
+

networkRef.name

+

Optional

+
+

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+
+

networkRef.namespace

+

Optional

+
+

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+

pscTargetService

@@ -308,6 +358,47 @@ a Google API or a PSC Producer Service Attachment.{% endverbatim %}

{% verbatim %}Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default.{% endverbatim %}

+

subnetworkRef

+

Optional

+
+

object

+

{% verbatim %}Immutable. This field is only used for PSC. +Optional URL of the subnetwork to which all network endpoints in the NEG belong.{% endverbatim %}

+
+

subnetworkRef.external

+

Optional

+
+

string

+

{% verbatim %}Allowed value: The `selfLink` field of a `ComputeSubnetwork` resource.{% endverbatim %}

+
+

subnetworkRef.name

+

Optional

+
+

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+
+

subnetworkRef.namespace

+

Optional

+
+

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+
diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computesnapshot.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computesnapshot.md index 9882772017..df907ac6ea 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computesnapshot.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computesnapshot.md @@ -45,9 +45,27 @@ Can Be Referenced by IAMPolicy/IAMPolicyMember + Yes + + + + Supports IAM Conditions No + + Supports IAM Audit Configs + No + + + IAM External Reference Format + + +

{% verbatim %}projects/{{project}}/global/snapshots/{{name}}{% endverbatim %}

+ + + + diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md index ca470c7084..d300153b69 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/compute/computetargethttpsproxy.md @@ -73,6 +73,10 @@ ### Spec #### Schema ```yaml +certificateMapRef: + external: string + name: string + namespace: string description: string location: string proxyBind: boolean @@ -99,6 +103,50 @@ urlMapRef: + + +

certificateMapRef

+

Optional

+ + +

object

+

{% verbatim %}Only the `external` field is supported to configure the reference. + +A reference to the CertificateMap resource uri that identifies a +certificate map associated with the given target proxy. This field +can only be set for global target proxies.{% endverbatim %}

+ + + + +

certificateMapRef.external

+

Optional

+ + +

string

+

{% verbatim %}Allowed value: string of the format `//certificatemanager.googleapis.com/projects/{{project}}/locations/{{location}}/certificateMaps/{{value}}`, where {{value}} is the `name` field of a `CertificateManagerCertificateMap` resource.{% endverbatim %}

+ + + + +

certificateMapRef.name

+

Optional

+ + +

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+ + + + +

certificateMapRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+ +

description

@@ -157,7 +205,7 @@ equivalent to DISABLE. Default value: "NONE" Possible values: ["NONE", "ENABLE",

sslCertificates

-

Required

+

Optional

list (object)

@@ -167,7 +215,7 @@ equivalent to DISABLE. Default value: "NONE" Possible values: ["NONE", "ENABLE",

sslCertificates[]

-

Required

+

Optional

object

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/container/containercluster.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/container/containercluster.md index f6bcda3eee..1fe4d4f8e4 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/container/containercluster.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/container/containercluster.md @@ -109,8 +109,15 @@ addonsConfig: disabled: boolean authenticatorGroupsConfig: securityGroup: string +binaryAuthorization: + enabled: boolean + evaluationMode: string clusterAutoscaling: autoProvisioningDefaults: + bootDiskKMSKeyRef: + external: string + name: string + namespace: string imageType: string minCpuPlatform: string oauthScopes: @@ -194,6 +201,8 @@ masterAuthorizedNetworksConfig: cidrBlocks: - cidrBlock: string displayName: string +meshCertificates: + enableCertificates: boolean minMasterVersion: string monitoringConfig: enableComponents: @@ -402,7 +411,7 @@ workloadIdentityConfig:

object

-

{% verbatim %}Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Defaults to disabled; set enabled = true to enable.{% endverbatim %}

+

{% verbatim %}Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Defaults to enabled; set disabled = true to disable.{% endverbatim %}

@@ -572,7 +581,7 @@ workloadIdentityConfig:

object

-

{% verbatim %}Immutable. Configuration for the Google Groups for GKE feature.{% endverbatim %}

+

{% verbatim %}Configuration for the Google Groups for GKE feature.{% endverbatim %}

@@ -582,7 +591,37 @@ workloadIdentityConfig:

string

-

{% verbatim %}Immutable. The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com.{% endverbatim %}

+

{% verbatim %}The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com.{% endverbatim %}

+ + + + +

binaryAuthorization

+

Optional

+ + +

object

+

{% verbatim %}Configuration options for the Binary Authorization feature.{% endverbatim %}

+ + + + +

binaryAuthorization.enabled

+

Optional

+ + +

boolean

+

{% verbatim %}DEPRECATED. Deprecated in favor of evaluation_mode. Enable Binary Authorization for this cluster.{% endverbatim %}

+ + + + +

binaryAuthorization.evaluationMode

+

Optional

+ + +

string

+

{% verbatim %}Mode of operation for Binary Authorization policy evaluation.{% endverbatim %}

@@ -605,6 +644,47 @@ workloadIdentityConfig:

{% verbatim %}Contains defaults for a node pool created by NAP.{% endverbatim %}

+ + +

clusterAutoscaling.autoProvisioningDefaults.bootDiskKMSKeyRef

+

Optional

+ + +

object

+

{% verbatim %}Immutable. The Customer Managed Encryption Key used to encrypt the +boot disk attached to each node in the node pool.{% endverbatim %}

+ + + + +

clusterAutoscaling.autoProvisioningDefaults.bootDiskKMSKeyRef.external

+

Optional

+ + +

string

+

{% verbatim %}Allowed value: The `selfLink` field of a `KMSCryptoKey` resource.{% endverbatim %}

+ + + + +

clusterAutoscaling.autoProvisioningDefaults.bootDiskKMSKeyRef.name

+

Optional

+ + +

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+ + + + +

clusterAutoscaling.autoProvisioningDefaults.bootDiskKMSKeyRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+ +

clusterAutoscaling.autoProvisioningDefaults.imageType

@@ -942,7 +1022,7 @@ workloadIdentityConfig:

boolean

-

{% verbatim %}Enable Binary Authorization for this cluster. If enabled, all container images will be validated by Google Binary Authorization.{% endverbatim %}

+

{% verbatim %}DEPRECATED. Deprecated in favor of binary_authorization. Enable Binary Authorization for this cluster. If enabled, all container images will be validated by Google Binary Authorization.{% endverbatim %}

@@ -1465,6 +1545,26 @@ workloadIdentityConfig:

{% verbatim %}Field for users to identify CIDR blocks.{% endverbatim %}

+ + +

meshCertificates

+

Optional

+ + +

object

+

{% verbatim %}If set, and enable_certificates=true, the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster.{% endverbatim %}

+ + + + +

meshCertificates.enableCertificates

+

Required*

+ + +

boolean

+

{% verbatim %}When enabled the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster.{% endverbatim %}

+ +

minMasterVersion

@@ -1492,7 +1592,7 @@ workloadIdentityConfig:

list (string)

-

{% verbatim %}GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS and WORKLOADS.{% endverbatim %}

+

{% verbatim %}GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, SCHEDULER, and WORKLOADS.{% endverbatim %}

@@ -2324,7 +2424,7 @@ for running workloads on sole tenant nodes.{% endverbatim %}

boolean

-

{% verbatim %}Immutable. Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking.{% endverbatim %}

+

{% verbatim %}Immutable. When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. This field only applies to private clusters, when enable_private_nodes is true.{% endverbatim %}

@@ -2334,7 +2434,7 @@ for running workloads on sole tenant nodes.{% endverbatim %}

boolean

-

{% verbatim %}Immutable. When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. This field only applies to private clusters, when enable_private_nodes is true.{% endverbatim %}

+

{% verbatim %}Immutable. Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking.{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/dataproc/dataprocworkflowtemplate.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/dataproc/dataprocworkflowtemplate.md index 981b6a915c..140bea7262 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/dataproc/dataprocworkflowtemplate.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/dataproc/dataprocworkflowtemplate.md @@ -603,7 +603,7 @@ resourceID: string

list (string)

-

{% verbatim %}Immutable. Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }{% endverbatim %}

+

{% verbatim %}Immutable. Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }{% endverbatim %}

@@ -733,7 +733,7 @@ resourceID: string

list (string)

-

{% verbatim %}Immutable. Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }{% endverbatim %}

+

{% verbatim %}Immutable. Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }{% endverbatim %}

@@ -883,7 +883,7 @@ resourceID: string

list (string)

-

{% verbatim %}Immutable. Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }{% endverbatim %}

+

{% verbatim %}Immutable. Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }{% endverbatim %}

@@ -1413,7 +1413,7 @@ resourceID: string

list (string)

-

{% verbatim %}Immutable. Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }{% endverbatim %}

+

{% verbatim %}Immutable. Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/gkehub/gkehubmembership.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/gkehub/gkehubmembership.md index 62ae758429..6510631ecb 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/gkehub/gkehubmembership.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/gkehub/gkehubmembership.md @@ -352,7 +352,7 @@ updateTime: string authority.workloadIdentityPool

string

-

{% verbatim %}Output only. The name of the workload identity pool in which `issuer` will be recognized. There is a single Workload Identity Pool per Hub that is shared between all Memberships that belong to that Hub. For a Hub hosted in: {PROJECT_ID}, the workload pool format is `{PROJECT_ID}.hub.id.goog`, although this is subject to change in newer versions of this API.{% endverbatim %}

+

{% verbatim %}Output only. The name of the workload identity pool in which `issuer` will be recognized. There is a single Workload Identity Pool per Hub that is shared between all Memberships that belong to that Hub. For a Hub hosted in {PROJECT_ID}, the workload pool format is `{PROJECT_ID}.hub.id.goog`, although this is subject to change in newer versions of this API.{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampartialpolicy.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampartialpolicy.md index 5012890237..47638b987e 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampartialpolicy.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampartialpolicy.md @@ -139,6 +139,10 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since CloudFunctionsFunction Y + + ComputeBackendBucket + + ComputeDisk @@ -151,6 +155,10 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since ComputeInstance Y + + ComputeSnapshot + + ComputeSubnetwork Y @@ -322,6 +330,14 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since + + ComputeBackendBucket + + +

{% verbatim %}projects/{{project}}/global/backendBuckets/{{name}}{% endverbatim %}

+ + + ComputeDisk @@ -348,6 +364,14 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since + + ComputeSnapshot + + +

{% verbatim %}projects/{{project}}/global/snapshots/{{name}}{% endverbatim %}

+ + + ComputeSubnetwork diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampolicy.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampolicy.md index e18d99bec0..aeb72c3c64 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampolicy.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampolicy.md @@ -150,6 +150,11 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since Y + + ComputeBackendBucket + + + ComputeDisk @@ -165,6 +170,11 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since Y + + ComputeSnapshot + + + ComputeSubnetwork Y @@ -358,6 +368,14 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since + + ComputeBackendBucket + + +

{% verbatim %}projects/{{project}}/global/backendBuckets/{{name}}{% endverbatim %}

+ + + ComputeDisk @@ -384,6 +402,14 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since + + ComputeSnapshot + + +

{% verbatim %}projects/{{project}}/global/snapshots/{{name}}{% endverbatim %}

+ + + ComputeSubnetwork diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampolicymember.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampolicymember.md index 784e1a6354..9106203684 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampolicymember.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/iam/iampolicymember.md @@ -130,6 +130,10 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since CloudFunctionsFunction + + ComputeBackendBucket + + ComputeDisk @@ -142,6 +146,10 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since ComputeInstance Y + + ComputeSnapshot + + ComputeSubnetwork Y @@ -313,6 +321,14 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since + + ComputeBackendBucket + + +

{% verbatim %}projects/{{project}}/global/backendBuckets/{{name}}{% endverbatim %}

+ + + ComputeDisk @@ -339,6 +355,14 @@ resources using `IAMPolicy`, `IAMPartialPolicy`, and `IAMPolicyMember` since + + ComputeSnapshot + + +

{% verbatim %}projects/{{project}}/global/snapshots/{{name}}{% endverbatim %}

+ + + ComputeSubnetwork diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/identityplatform/identityplatformoauthidpconfig.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/identityplatform/identityplatformoauthidpconfig.md index f813e61ce2..bce5667824 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/identityplatform/identityplatformoauthidpconfig.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/identityplatform/identityplatformoauthidpconfig.md @@ -219,7 +219,7 @@ responseType:

object

-

{% verbatim %}The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g.: {id\_token, token}).{% endverbatim %}

+

{% verbatim %}The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g. {id\_token, token}).{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/identityplatform/identityplatformtenantoauthidpconfig.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/identityplatform/identityplatformtenantoauthidpconfig.md index bfe0fe206a..4dabc8a893 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/identityplatform/identityplatformtenantoauthidpconfig.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/identityplatform/identityplatformtenantoauthidpconfig.md @@ -223,7 +223,7 @@ tenantRef:

object

-

{% verbatim %}The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g.: {id\_token, token}).{% endverbatim %}

+

{% verbatim %}The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g. {id\_token, token}).{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/kms/kmscryptokey.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/kms/kmscryptokey.md index a020a2d258..30e7fc907a 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/kms/kmscryptokey.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/kms/kmscryptokey.md @@ -169,7 +169,7 @@ If not specified at creation time, the default duration is 24 hours.{% endverbat

string

{% verbatim %}Immutable. The immutable purpose of this CryptoKey. See the [purpose reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) -for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT"].{% endverbatim %}

+for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", "MAC"].{% endverbatim %}

@@ -234,7 +234,7 @@ See the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v

string

-

{% verbatim %}Immutable. The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL". Defaults to "SOFTWARE".{% endverbatim %}

+

{% verbatim %}Immutable. The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL", "EXTERNAL_VPC". Defaults to "SOFTWARE".{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/logging/logginglogmetric.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/logging/logginglogmetric.md index 7a0896d5e2..46a810b0d9 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/logging/logginglogmetric.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/logging/logginglogmetric.md @@ -396,7 +396,7 @@ valueExtractor: string

string

-

{% verbatim %}The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component: { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, "new users per day" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new users). Alternatively, "thousands of page views per day" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean "5300 page views per day"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means "3 percent"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means "3 percent").{% endverbatim %}

+

{% verbatim %}The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, "new users per day" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new users). Alternatively, "thousands of page views per day" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean "5300 page views per day"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means "3 percent"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means "3 percent").{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringalertpolicy.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringalertpolicy.md index 7cae1ab704..1b9ae4027d 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringalertpolicy.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringalertpolicy.md @@ -97,6 +97,7 @@ conditions: string: string conditionMonitoringQueryLanguage: duration: string + evaluationMissingData: string query: string trigger: count: integer @@ -117,6 +118,7 @@ conditions: perSeriesAligner: string denominatorFilter: string duration: string + evaluationMissingData: string filter: string thresholdValue: float trigger: @@ -521,6 +523,18 @@ that unhealthy states are detected and alerted on quickly.{% endverbatim %}

+ + +

conditions[].conditionMonitoringQueryLanguage.evaluationMissingData

+

Optional

+ + +

string

+

{% verbatim %}A condition control that determines how +metric-threshold conditions are evaluated when +data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"].{% endverbatim %}

+ +

conditions[].conditionMonitoringQueryLanguage.query

@@ -953,6 +967,18 @@ that unhealthy states are detected and alerted on quickly.{% endverbatim %}

+ + +

conditions[].conditionThreshold.evaluationMissingData

+

Optional

+ + +

string

+

{% verbatim %}A condition control that determines how +metric-threshold conditions are evaluated when +data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"].{% endverbatim %}

+ +

conditions[].conditionThreshold.filter

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md index 1e0eacd96a..d8c2d0d666 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringdashboard.md @@ -862,7 +862,7 @@ rowLayout:

list (object)

-

{% verbatim %}The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.{% endverbatim %}

+

{% verbatim %}The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.{% endverbatim %}

@@ -2332,7 +2332,7 @@ rowLayout:

list (object)

-

{% verbatim %}The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.{% endverbatim %}

+

{% verbatim %}The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.{% endverbatim %}

@@ -3812,7 +3812,7 @@ rowLayout:

list (object)

-

{% verbatim %}The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.{% endverbatim %}

+

{% verbatim %}The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.{% endverbatim %}

@@ -5374,7 +5374,7 @@ Allowed value: The Google Cloud resource name of a `Project` resource (format: `

list (object)

-

{% verbatim %}The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.{% endverbatim %}

+

{% verbatim %}The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringmetricdescriptor.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringmetricdescriptor.md index 311c4ac104..5d742c9fa1 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringmetricdescriptor.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/monitoring/monitoringmetricdescriptor.md @@ -276,7 +276,7 @@ Allowed value: The Google Cloud resource name of a `Project` resource (format: `

string

-

{% verbatim %}Immutable. The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component: { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, "new users per day" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new users). Alternatively, "thousands of page views per day" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean "5300 page views per day"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means "3 percent"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means "3 percent").{% endverbatim %}

+

{% verbatim %}Immutable. The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, "new users per day" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new users). Alternatively, "thousands of page views per day" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean "5300 page views per day"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means "3 percent"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means "3 percent").{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/osconfig/osconfigguestpolicy.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/osconfig/osconfigguestpolicy.md index f701815050..8078e33f6a 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/osconfig/osconfigguestpolicy.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/osconfig/osconfigguestpolicy.md @@ -847,7 +847,7 @@ Allowed value: The Google Cloud resource name of a `StorageBucket` resource (for

string

-

{% verbatim %}URI from which to fetch the object. It should contain both the protocol and path following the format: {protocol}://{location}.{% endverbatim %}

+

{% verbatim %}URI from which to fetch the object. It should contain both the protocol and path following the format {protocol}://{location}.{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/pubsub/pubsubsubscription.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/pubsub/pubsubsubscription.md index d945d9ed26..2a2b895fd2 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/pubsub/pubsubsubscription.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/pubsub/pubsubsubscription.md @@ -92,6 +92,14 @@ #### Schema ```yaml ackDeadlineSeconds: integer +bigqueryConfig: + dropUnknownFields: boolean + tableRef: + external: string + name: string + namespace: string + useTopicSchema: boolean + writeMetadata: boolean deadLetterPolicy: deadLetterTopicRef: external: string @@ -156,6 +164,90 @@ If the subscriber never acknowledges the message, the Pub/Sub system will eventually redeliver the message.{% endverbatim %}

+ + +

bigqueryConfig

+

Optional

+ + +

object

+

{% verbatim %}If delivery to BigQuery is used with this subscription, this field is used to configure it. +Either pushConfig or bigQueryConfig can be set, but not both. +If both are empty, then the subscriber will pull and ack messages using API methods.{% endverbatim %}

+ + + + +

bigqueryConfig.dropUnknownFields

+

Optional

+ + +

boolean

+

{% verbatim %}When true and useTopicSchema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. +Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog.{% endverbatim %}

+ + + + +

bigqueryConfig.tableRef

+

Required*

+ + +

object

+

{% verbatim %}The name of the table to which to write data.{% endverbatim %}

+ + + + +

bigqueryConfig.tableRef.external

+

Optional

+ + +

string

+

{% verbatim %}Allowed value: string of the format `{{project}}.{{dataset_id}}.{{value}}`, where {{value}} is the `name` field of a `BigQueryTable` resource.{% endverbatim %}

+ + + + +

bigqueryConfig.tableRef.name

+

Optional

+ + +

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+ + + + +

bigqueryConfig.tableRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+ + + + +

bigqueryConfig.useTopicSchema

+

Optional

+ + +

boolean

+

{% verbatim %}When true, use the topic's schema as the columns to write to in BigQuery, if it exists.{% endverbatim %}

+ + + + +

bigqueryConfig.writeMetadata

+

Optional

+ + +

boolean

+

{% verbatim %}When true, write the subscription name, messageId, publishTime, attributes, and orderingKey to additional columns in the table. +The subscription name, messageId, and publishTime fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column.{% endverbatim %}

+ +

deadLetterPolicy

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/redis/redisinstance.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/redis/redisinstance.md index 38e14b0b8a..18fcb6f598 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/redis/redisinstance.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/redis/redisinstance.md @@ -81,6 +81,10 @@ authorizedNetworkRef: name: string namespace: string connectMode: string +customerManagedKeyRef: + external: string + name: string + namespace: string displayName: string locationId: string maintenancePolicy: @@ -206,6 +210,48 @@ unspecified, the default network will be used.{% endverbatim %}

{% verbatim %}Immutable. The connection mode of the Redis instance. Default value: "DIRECT_PEERING" Possible values: ["DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS"].{% endverbatim %}

+ + +

customerManagedKeyRef

+

Optional

+ + +

object

+

{% verbatim %}Immutable. Optional. The KMS key reference that you want to use to +encrypt the data at rest for this Redis instance. If this is +provided, CMEK is enabled.{% endverbatim %}

+ + + + +

customerManagedKeyRef.external

+

Optional

+ + +

string

+

{% verbatim %}Allowed value: The `selfLink` field of a `KMSCryptoKey` resource.{% endverbatim %}

+ + + + +

customerManagedKeyRef.name

+

Optional

+ + +

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+ + + + +

customerManagedKeyRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+ +

displayName

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/run/runservice.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/run/runservice.md index 9bdf7a73ab..6b2e8ed9c3 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/run/runservice.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/run/runservice.md @@ -509,7 +509,7 @@ Allowed value: The Google Cloud resource name of a `Project` resource (format: `

string

-

{% verbatim %}Required. The name of the secret in Cloud Secret Manager. Format: {secret_name} if the secret is in the same project. projects/{project}/secrets/{secret_name} if the secret is in a different project. +

{% verbatim %}Required. The name of the secret in Cloud Secret Manager. Format {secret_name} if the secret is in the same project. projects/{project}/secrets/{secret_name} if the secret is in a different project. Allowed value: The Google Cloud resource name of a `SecretManagerSecret` resource (format: `projects/{{project}}/secrets/{{name}}`).{% endverbatim %}

@@ -1027,7 +1027,7 @@ Allowed value: The Google Cloud resource name of a `SecretManagerSecretVersion`

string

-

{% verbatim %}Required. The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. +

{% verbatim %}Required. The name of the secret in Cloud Secret Manager. Format {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. Allowed value: The Google Cloud resource name of a `SecretManagerSecret` resource (format: `projects/{{project}}/secrets/{{name}}`).{% endverbatim %}

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/spanner/spannerdatabase.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/spanner/spannerdatabase.md index 3227efcd4d..d248600ab9 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/spanner/spannerdatabase.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/spanner/spannerdatabase.md @@ -104,6 +104,7 @@ instanceRef: name: string namespace: string resourceID: string +versionRetentionPeriod: string ``` @@ -120,7 +121,8 @@ resourceID: string @@ -247,6 +249,20 @@ must exist in the same location as the Spanner Database.{% endverbatim %}

{% verbatim %}Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default.{% endverbatim %}

+ + + +

string

-

{% verbatim %}{% endverbatim %}

+

{% verbatim %}Immutable. The dialect of the Cloud Spanner Database. +If it is not provided, "GOOGLE_STANDARD_SQL" will be used. Possible values: ["GOOGLE_STANDARD_SQL", "POSTGRESQL"].{% endverbatim %}

+

versionRetentionPeriod

+

Optional

+
+

string

+

{% verbatim %}The retention period for the database. The retention period must be between 1 hour +and 7 days, and can be specified in days, hours, minutes, or seconds. For example, +the values 1d, 24h, 1440m, and 86400s are equivalent. Default value is 1h. +If this property is used, you must avoid adding new DDL statements to 'ddl' that +update the database's version_retention_period.{% endverbatim %}

+
diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/sql/sqlinstance.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/sql/sqlinstance.md index 1ff92e2246..b0ea89a37c 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/sql/sqlinstance.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/sql/sqlinstance.md @@ -158,13 +158,28 @@ settings: requireSsl: boolean locationPreference: followGaeApplication: string + secondaryZone: string zone: string maintenanceWindow: day: integer hour: integer updateTrack: string + passwordValidationPolicy: + complexity: string + disallowUsernameSubstring: boolean + enablePasswordPolicy: boolean + minLength: integer + passwordChangeInterval: string + reuseInterval: integer pricingPlan: string replicationType: string + sqlServerAuditConfig: + bucketRef: + external: string + name: string + namespace: string + retentionInterval: string + uploadInterval: string tier: string ``` @@ -462,7 +477,7 @@ settings:

object

-

{% verbatim %}Immutable. Initial root password. Required for MS SQL Server, ignored by MySQL and PostgreSQL.{% endverbatim %}

+

{% verbatim %}Immutable. Initial root password. Required for MS SQL Server.{% endverbatim %}

@@ -698,7 +713,7 @@ is set to true.{% endverbatim %}

string

-

{% verbatim %}The name of server instance collation.{% endverbatim %}

+

{% verbatim %}Immutable. The name of server instance collation.{% endverbatim %}

@@ -992,6 +1007,16 @@ Specifying this field has no-ops; it's recommended to remove this field from you

{% verbatim %}A Google App Engine application whose zone to remain in. Must be in the same region as this instance.{% endverbatim %}

+ + +

settings.locationPreference.secondaryZone

+

Optional

+ + +

string

+

{% verbatim %}The preferred Compute Engine zone for the secondary/failover.{% endverbatim %}

+ +

settings.locationPreference.zone

@@ -1042,6 +1067,76 @@ Specifying this field has no-ops; it's recommended to remove this field from you

{% verbatim %}Receive updates earlier (canary) or later (stable).{% endverbatim %}

+ + +

settings.passwordValidationPolicy

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

settings.passwordValidationPolicy.complexity

+

Optional

+ + +

string

+

{% verbatim %}Password complexity.{% endverbatim %}

+ + + + +

settings.passwordValidationPolicy.disallowUsernameSubstring

+

Optional

+ + +

boolean

+

{% verbatim %}Disallow username as a part of the password.{% endverbatim %}

+ + + + +

settings.passwordValidationPolicy.enablePasswordPolicy

+

Required*

+ + +

boolean

+

{% verbatim %}Whether the password policy is enabled or not.{% endverbatim %}

+ + + + +

settings.passwordValidationPolicy.minLength

+

Optional

+ + +

integer

+

{% verbatim %}Minimum number of characters allowed.{% endverbatim %}

+ + + + +

settings.passwordValidationPolicy.passwordChangeInterval

+

Optional

+ + +

string

+

{% verbatim %}Minimum interval after which the password can be changed. This flag is only supported for PostgresSQL.{% endverbatim %}

+ + + + +

settings.passwordValidationPolicy.reuseInterval

+

Optional

+ + +

integer

+

{% verbatim %}Number of previous passwords that cannot be reused.{% endverbatim %}

+ +

settings.pricingPlan

@@ -1063,6 +1158,76 @@ Specifying this field has no-ops; it's recommended to remove this field from you Specifying this field has no-ops; it's recommended to remove this field from your configuration.{% endverbatim %}

+ + +

settings.sqlServerAuditConfig

+

Optional

+ + +

object

+

{% verbatim %}{% endverbatim %}

+ + + + +

settings.sqlServerAuditConfig.bucketRef

+

Required*

+ + +

object

+

{% verbatim %}The name of the destination bucket (e.g., gs://mybucket).{% endverbatim %}

+ + + + +

settings.sqlServerAuditConfig.bucketRef.external

+

Optional

+ + +

string

+

{% verbatim %}Allowed value: The `url` field of a `StorageBucket` resource.{% endverbatim %}

+ + + + +

settings.sqlServerAuditConfig.bucketRef.name

+

Optional

+ + +

string

+

{% verbatim %}Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names{% endverbatim %}

+ + + + +

settings.sqlServerAuditConfig.bucketRef.namespace

+

Optional

+ + +

string

+

{% verbatim %}Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/{% endverbatim %}

+ + + + +

settings.sqlServerAuditConfig.retentionInterval

+

Optional

+ + +

string

+

{% verbatim %}How long to keep generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s"..{% endverbatim %}

+ + + + +

settings.sqlServerAuditConfig.uploadInterval

+

Optional

+ + +

string

+

{% verbatim %}How often to upload generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".{% endverbatim %}

+ +

settings.tier

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/storage/storagebucket.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/storage/storagebucket.md index 7c49225b7a..9a635202ca 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/storage/storagebucket.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/storage/storagebucket.md @@ -125,8 +125,12 @@ lifecycleRule: customTimeBefore: string daysSinceCustomTime: integer daysSinceNoncurrentTime: integer + matchesPrefix: + - string matchesStorageClass: - string + matchesSuffix: + - string noncurrentTimeBefore: string numNewerVersions: integer withState: string @@ -428,6 +432,26 @@ Enables Bucket PolicyOnly access to a bucket.{% endverbatim %}

condition is relevant only for versioned objects.{% endverbatim %}

+ + +

lifecycleRule[].condition.matchesPrefix

+

Optional

+ + +

list (string)

+

{% verbatim %}One or more matching name prefixes to satisfy this condition.{% endverbatim %}

+ + + + +

lifecycleRule[].condition.matchesPrefix[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ +

lifecycleRule[].condition.matchesStorageClass

@@ -448,6 +472,26 @@ Enables Bucket PolicyOnly access to a bucket.{% endverbatim %}

{% verbatim %}{% endverbatim %}

+ + +

lifecycleRule[].condition.matchesSuffix

+

Optional

+ + +

list (string)

+

{% verbatim %}One or more matching name suffixes to satisfy this condition.{% endverbatim %}

+ + + + +

lifecycleRule[].condition.matchesSuffix[]

+

Optional

+ + +

string

+

{% verbatim %}{% endverbatim %}

+ +

lifecycleRule[].condition.noncurrentTimeBefore

diff --git a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/vpcaccess/vpcaccessconnector.md b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/vpcaccess/vpcaccessconnector.md index 785d39f746..92048d2040 100644 --- a/scripts/generate-google3-docs/resource-reference/generated/resource-docs/vpcaccess/vpcaccessconnector.md +++ b/scripts/generate-google3-docs/resource-reference/generated/resource-docs/vpcaccess/vpcaccessconnector.md @@ -284,7 +284,7 @@ Allowed value: The Google Cloud resource name of a `Project` resource (format: `

string

-

{% verbatim %}Subnet name (relative, not fully qualified). E.g. if the full subnet selfLink is https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName} the correct input for this field would be: {subnetName} +

{% verbatim %}Subnet name (relative, not fully qualified). E.g. if the full subnet selfLink is https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName} the correct input for this field would be {subnetName} Allowed value: The Google Cloud resource name of a `ComputeSubnetwork` resource (format: `projects/{{project}}/regions/{{region}}/subnetworks/{{name}}`).{% endverbatim %}

diff --git a/third_party/Makefile b/third_party/Makefile index 06ee743cc0..9fe9fa0c5d 100644 --- a/third_party/Makefile +++ b/third_party/Makefile @@ -24,7 +24,7 @@ clone-terraform-google-provider-beta: rm -rf github.com/hashicorp/terraform-provider-google-beta mkdir -p github.com/hashicorp cd github.com/hashicorp && git clone https://github.com/hashicorp/terraform-provider-google-beta - git -C github.com/hashicorp/terraform-provider-google-beta checkout v4.27.0 + git -C github.com/hashicorp/terraform-provider-google-beta checkout v4.33.0 rm -rf github.com/hashicorp/terraform-provider-google-beta/.git .PHONY: apply-patches diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/.github/reviewer-lottery.yml b/third_party/github.com/hashicorp/terraform-provider-google-beta/.github/reviewer-lottery.yml index d71b7e6ada..074898296a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/.github/reviewer-lottery.yml +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/.github/reviewer-lottery.yml @@ -2,6 +2,7 @@ groups: - name: devs reviewers: 1 usernames: + - rileykarson - slevenick - c2thorn - scottsuarez diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/.github/workflows/go.yml b/third_party/github.com/hashicorp/terraform-provider-google-beta/.github/workflows/go.yml index c22f9d39f2..665e43f71b 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/.github/workflows/go.yml +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/.github/workflows/go.yml @@ -23,14 +23,15 @@ jobs: - name: Get dependencies run: | - go get - make tools + go mod download - name: Build - run: go build + run: | + make lint + make generate + go build - name: Test run: | make docscheck - make lint make test diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/.github/workflows/upstream-mm.yml b/third_party/github.com/hashicorp/terraform-provider-google-beta/.github/workflows/upstream-mm.yml new file mode 100644 index 0000000000..eae4bdd80c --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/.github/workflows/upstream-mm.yml @@ -0,0 +1,15 @@ +name: "Upstream To MM" +on: + pull_request_target: + types: [opened, ready_for_review, reopened] + +jobs: + pr-warning: + if: ${{ github.actor != 'modular-magician' }} + runs-on: ubuntu-latest + steps: + - name: Post the warning + uses: hashicorp/pr-warning@v1.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + warning: "This repository is generated by https://github.com/GoogleCloudPlatform/magic-modules. Any changes made directly to this repository will likely be overwritten. If you have further questions, please feel free to ping your reviewer or, internal employees, reach out to one of the engineers. Thank you!" diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/.go-version b/third_party/github.com/hashicorp/terraform-provider-google-beta/.go-version index 199f3c2bf5..ec6d649be6 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/.go-version +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/.go-version @@ -1 +1 @@ -1.16.14 +1.18.1 diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/.golangci.yml b/third_party/github.com/hashicorp/terraform-provider-google-beta/.golangci.yml index 25d5d46bac..7b08f8a978 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/.golangci.yml +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/.golangci.yml @@ -1,5 +1,5 @@ run: - deadline: 2m30s + timeout: 5m linters: disable-all: true diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/.goreleaser.yml b/third_party/github.com/hashicorp/terraform-provider-google-beta/.goreleaser.yml index caac45fd42..5b3a87eded 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/.goreleaser.yml +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/.goreleaser.yml @@ -29,7 +29,7 @@ builds: - goarch: arm64 goos: windows ldflags: - - -s -w -X internal/provider.Version={{.Version}} + - -s -w -X version.ProviderVersion={{.Version}} mod_timestamp: '{{ .CommitTimestamp }}' checksum: extra_files: diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/CHANGELOG.md b/third_party/github.com/hashicorp/terraform-provider-google-beta/CHANGELOG.md index c4679b8c32..3483fa9dcb 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/CHANGELOG.md +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/CHANGELOG.md @@ -1,4 +1,159 @@ -## 4.27.0 (Unreleased) +## 4.33.0 (Unreleased) + +IMPROVEMENTS: +* container: added update support for `authenticator_groups_config` in `google_container_cluster` ([#4591](https://github.com/hashicorp/terraform-provider-google-beta/pull/4591)) +* dataflow: added ability to import `google_dataflow_job` ([#4595](https://github.com/hashicorp/terraform-provider-google-beta/pull/4595)) +* dns: added `managed_zone_id` attribute to `google_dns_managed_zone` data source ([#4593](https://github.com/hashicorp/terraform-provider-google-beta/pull/4593)) +* metastore: added `metadata_integration` and `hive_metastore_config.auxiliary_versions` fields to `google_dataproc_metastore_service` resource ([#4598](https://github.com/hashicorp/terraform-provider-google-beta/pull/4598)) +* monitoring: added `accepted_response_status_codes` to `google_monitoring_uptime_check_config` ([#4594](https://github.com/hashicorp/terraform-provider-google-beta/pull/4594)) +* sql: added `password_validation_policy` field to `google_cloud_sql` resource ([#4597](https://github.com/hashicorp/terraform-provider-google-beta/pull/4597)) + +BUG FIXES: +* bigquery: removed force replacement for `display_name` on `google_bigquery_data_transfer_config` ([#4592](https://github.com/hashicorp/terraform-provider-google-beta/pull/4592)) +* compute: fixed permadiff for `instance_termination_action` in `google_compute_instance_template` ([#4590](https://github.com/hashicorp/terraform-provider-google-beta/pull/4590)) + +## 4.32.0 (August 15, 2022) + +NOTES: +* Updated to Golang 1.18 ([#4564](https://github.com/hashicorp/terraform-provider-google-beta/pull/4564)) + +FEATURES: +* **New Resource:** `google_dataplex_asset` ([#4543](https://github.com/hashicorp/terraform-provider-google-beta/pull/4543)) +* **New Resource:** `google_gke_hub_membership_iam_binding` ([#4583](https://github.com/hashicorp/terraform-provider-google-beta/pull/4583)) +* **New Resource:** `google_gke_hub_membership_iam_member` ([#4583](https://github.com/hashicorp/terraform-provider-google-beta/pull/4583)) +* **New Resource:** `google_gke_hub_membership_iam_policy` ([#4583](https://github.com/hashicorp/terraform-provider-google-beta/pull/4583)) + +IMPROVEMENTS: +* certificatemanager: added `state`, `authorization_attempt_info` and `provisioning_issue` output fields to `google_certificate_manager_certificate` ([#4548](https://github.com/hashicorp/terraform-provider-google-beta/pull/4548)) +* cloudfunctions2: added field `event_filters` to resource `google_cloudfunctions2_function` ([#4547](https://github.com/hashicorp/terraform-provider-google-beta/pull/4547)) +* compute: added `certificate_map` to `compute_target_https_proxy` resource ([#4550](https://github.com/hashicorp/terraform-provider-google-beta/pull/4550)) +* compute: added validation for name field on `google_compute_network` ([#4579](https://github.com/hashicorp/terraform-provider-google-beta/pull/4579)) +* compute: made `port` optional in `google_compute_network_endpoint` to allow network endpoints to be associated with `GCE_VM_IP` network endpoint groups ([#4575](https://github.com/hashicorp/terraform-provider-google-beta/pull/4575)) +* container: added support for additional values `APISERVER`, `CONTROLLER_MANAGER`, and `SCHEDULER` in `google_container_cluster.monitoring_config` ([#4565](https://github.com/hashicorp/terraform-provider-google-beta/pull/4565)) +* gkehub: added `monitoring` and `mutation_enabled` fields to resource `feature_membership` ([#4572](https://github.com/hashicorp/terraform-provider-google-beta/pull/4572)) +* gkehub: added better support for import for `google_gke_hub_membership` ([#4542](https://github.com/hashicorp/terraform-provider-google-beta/pull/4542)) +* pubsub: added `bigquery_config` to `google_pubsub_subscription` ([#4545](https://github.com/hashicorp/terraform-provider-google-beta/pull/4545)) +* scheduler: added `paused` field to `google_cloud_scheduler_job` ([#4535](https://github.com/hashicorp/terraform-provider-google-beta/pull/4535)) +* scheduler: added `state` output field to `google_cloud_scheduler_job` ([#4535](https://github.com/hashicorp/terraform-provider-google-beta/pull/4535)) + +BUG FIXES: +* apigee: fixed an issue where `google_apigee_instance` creation would fail due to multiple concurrent instances ([#4584](https://github.com/hashicorp/terraform-provider-google-beta/pull/4584)) +* billingbudget: fixed a bug where `google_billing_budget.budget_filter.services` was not updating. ([#4577](https://github.com/hashicorp/terraform-provider-google-beta/pull/4577)) +* compute: fixed perma-diff on `google_compute_disk` for new arm64 images ([#4533](https://github.com/hashicorp/terraform-provider-google-beta/pull/4533)) +* dataflow: fixed bug where permadiff would show on `google_dataflow_job.additional_experiments` ([#4576](https://github.com/hashicorp/terraform-provider-google-beta/pull/4576)) +* storage: fixed a bug in `google_storage_bucket` where `name` was incorrectly validated. ([#4566](https://github.com/hashicorp/terraform-provider-google-beta/pull/4566)) + +## 4.31.0 (Aug 1, 2022) + +FEATURES: +* **New Resource:** `google_dataplex_zone` ([#4511](https://github.com/hashicorp/terraform-provider-google-beta/pull/4511)) + +IMPROVEMENTS: +* bucket: added support for `matches_prefix` and `matches_suffix` in `condition` of a `lifecycle_rule` in `google_storage_bucket` ([#4527](https://github.com/hashicorp/terraform-provider-google-beta/pull/4527)) +* compute: added `network` and `subnetwork` fields to `google_compute_region_network_endpoint_group` for PSC. ([#4528](https://github.com/hashicorp/terraform-provider-google-beta/pull/4528)) +* container: added field `boot_disk_kms_key` to `auto_provisioning_defaults` in `google_container_cluster` ([#4524](https://github.com/hashicorp/terraform-provider-google-beta/pull/4524)) +* notebooks: added `bootDiskType` support for `PD_EXTREME` in `google_notebooks_instance` ([#4530](https://github.com/hashicorp/terraform-provider-google-beta/pull/4530)) +* notebooks: added `softwareConfig.upgradeable`, `softwareConfig.postStartupScriptBehavior`, `softwareConfig.kernels` in `google_notebooks_runtime` ([#4530](https://github.com/hashicorp/terraform-provider-google-beta/pull/4530)) +* storage: added name validation for `google_storage_bucket` ([#4532](https://github.com/hashicorp/terraform-provider-google-beta/pull/4532)) + +BUG FIXES: +* compute: fixed perma-diff on `google_compute_disk` for new arm64 images ([#4533](https://github.com/hashicorp/terraform-provider-google-beta/pull/4533)) +* dns: fixed a bug where `google_dns_record_set` would create an inconsistent plan when using interpolated values in `rrdatas` ([#4515](https://github.com/hashicorp/terraform-provider-google-beta/pull/4515)) +* kms: fixed setting of resource id post-import for `google_kms_crypto_key` ([#4520](https://github.com/hashicorp/terraform-provider-google-beta/pull/4520)) +* provider: fixed a bug where user-agent was showing "dev" rather than the provider version ([#4509](https://github.com/hashicorp/terraform-provider-google-beta/pull/4509)) + +## 4.30.0 (July 25, 2022) + +FEATURES: +* **New Data Source:** `google_service_account_jwt` ([#4489](https://github.com/hashicorp/terraform-provider-google-beta/pull/4489)) +* **New Resource:** `google_certificate_map_entry` ([#4501](https://github.com/hashicorp/terraform-provider-google-beta/pull/4501)) +* **New Resource:** `google_certificate_map` ([#4501](https://github.com/hashicorp/terraform-provider-google-beta/pull/4501)) +* **New Resource:** `google_compute_backend_bucket_iam_binding` ([#4484](https://github.com/hashicorp/terraform-provider-google-beta/pull/4484)) +* **New Resource:** `google_compute_backend_bucket_iam_member` ([#4484](https://github.com/hashicorp/terraform-provider-google-beta/pull/4484)) +* **New Resource:** `google_compute_backend_bucket_iam_policy` ([#4484](https://github.com/hashicorp/terraform-provider-google-beta/pull/4484)) +* **New Resource:** `google_dataproc_metastore_federation` ([#4482](https://github.com/hashicorp/terraform-provider-google-beta/pull/4482)) +* **New Resource:** `google_dataproc_metastore_federation_iam_binding` ([#4482](https://github.com/hashicorp/terraform-provider-google-beta/pull/4482)) +* **New Resource:** `google_dataproc_metastore_federation_iam_member` ([#4482](https://github.com/hashicorp/terraform-provider-google-beta/pull/4482)) +* **New Resource:** `google_dataproc_metastore_federation_iam_policy` ([#4482](https://github.com/hashicorp/terraform-provider-google-beta/pull/4482)) + +IMPROVEMENTS: +* billingbudget: made `thresholdRules` optional in `google_billing_budget` ([#4480](https://github.com/hashicorp/terraform-provider-google-beta/pull/4480)) +* compute: added `instance_termination_action` field to `google_compute_instance_template` resource to support Spot VM termination action ([#4488](https://github.com/hashicorp/terraform-provider-google-beta/pull/4488)) +* compute: added `instance_termination_action` field to `google_compute_instance` resource to support Spot VM termination action ([#4488](https://github.com/hashicorp/terraform-provider-google-beta/pull/4488)) +* compute: added `request_coalescing` and `bypass_cache_on_request_headers` fields to `compute_backend_bucket` ([#4484](https://github.com/hashicorp/terraform-provider-google-beta/pull/4484)) +* compute: added field `all_instances_config` to `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager` ([#4506](https://github.com/hashicorp/terraform-provider-google-beta/pull/4506)) +* compute: added support for `esp` protocol in `google_compute_packet_mirroring.filters.ip_protocols` ([#4496](https://github.com/hashicorp/terraform-provider-google-beta/pull/4496)) +* monitoring: added `evaluation_missing_data` field to `google_monitoring_alert_policy` ([#4502](https://github.com/hashicorp/terraform-provider-google-beta/pull/4502)) +* notebooks: added field `reserved_ip_range` to `google_notebooks_runtime` ([#4492](https://github.com/hashicorp/terraform-provider-google-beta/pull/4492)) + +BUG FIXES: +* bigtable: fixed an incorrect diff when adding two or more clusters ([#4490](https://github.com/hashicorp/terraform-provider-google-beta/pull/4490)) +* compute: allowed properly updating `adaptive_protection_config` in `compute_security_policy` ([#4478](https://github.com/hashicorp/terraform-provider-google-beta/pull/4478)) +* notebooks: fixed a bug where`google_notebooks_runtime` can't be updated ([#4492](https://github.com/hashicorp/terraform-provider-google-beta/pull/4492)) +* sql: fixed an issue in `google_sql_database_instance` where updates would fail because of the `collation` field ([#4505](https://github.com/hashicorp/terraform-provider-google-beta/pull/4505)) + +## 4.29.0 (July 18, 2022) + +FEATURES: +* **New Resource:** `google_cloudiot_registry_iam_binding` ([#4452](https://github.com/hashicorp/terraform-provider-google-beta/pull/4452)) +* **New Resource:** `google_cloudiot_registry_iam_member` ([#4452](https://github.com/hashicorp/terraform-provider-google-beta/pull/4452)) +* **New Resource:** `google_cloudiot_registry_iam_policy` ([#4452](https://github.com/hashicorp/terraform-provider-google-beta/pull/4452)) +* **New Resource:** `google_compute_snapshot_iam_binding` ([#4445](https://github.com/hashicorp/terraform-provider-google-beta/pull/4445)) +* **New Resource:** `google_compute_snapshot_iam_member` ([#4445](https://github.com/hashicorp/terraform-provider-google-beta/pull/4445)) +* **New Resource:** `google_compute_snapshot_iam_policy` ([#4445](https://github.com/hashicorp/terraform-provider-google-beta/pull/4445)) + +IMPROVEMENTS: +* container: added `binauthz_evaluation_mode` field to `resource_container_cluster`. ([#4451](https://github.com/hashicorp/terraform-provider-google-beta/pull/4451)) +* kms: added support for MAC value in `google_kms_crypto_key.purpose` ([#4458](https://github.com/hashicorp/terraform-provider-google-beta/pull/4458)) +* metastore: added `databaseType`, `releaseChannel`, and `hiveMetastoreConfig.endpointProtocol` arguments ([#4443](https://github.com/hashicorp/terraform-provider-google-beta/pull/4443)) + +BUG FIXES: +* bigquery: fixed case-sensitivity for `user_by_email` and `group_by_email` on `google_bigquery_dataset_access` ([#4446](https://github.com/hashicorp/terraform-provider-google-beta/pull/4446)) +* clouddeploy: fixed permadiff on `execution_configs` in `google_clouddeploy_target` resource ([#4450](https://github.com/hashicorp/terraform-provider-google/pull/4450)) +* cloudscheduler: fixed a diff on the last slash of uri on `google_cloud_scheduler_job` ([#4444](https://github.com/hashicorp/terraform-provider-google-beta/pull/4444)) +* compute: fixed force recreation on `provisioned_iops` of `google_compute_disk` ([#4464](https://github.com/hashicorp/terraform-provider-google-beta/pull/4464)) +* compute: fixed missing `network_interface.0.ipv6_access_config.0.external_ipv6` output on `google_compute_instance` ([#4470](https://github.com/hashicorp/terraform-provider-google-beta/pull/4470)) +* documentai: fixed a bug where eu region could not be utilized for documentai resources ([#4472](https://github.com/hashicorp/terraform-provider-google-beta/pull/4472)) +* gkehub: fixed a bug where `issuer` can't be updated on `google_gke_hub_membership` ([#4471](https://github.com/hashicorp/terraform-provider-google-beta/pull/4471)) + +## 4.28.0 (July 11, 2022) + +FEATURES: +* **New Resource:** google_bigquery_connection_iam_binding ([#4437](https://github.com/hashicorp/terraform-provider-google-beta/pull/4437)) +* **New Resource:** google_bigquery_connection_iam_member ([#4437](https://github.com/hashicorp/terraform-provider-google-beta/pull/4437)) +* **New Resource:** google_bigquery_connection_iam_policy ([#4437](https://github.com/hashicorp/terraform-provider-google-beta/pull/4437)) +* **New Resource:** google_cloud_tasks_queue_iam_binding ([#4427](https://github.com/hashicorp/terraform-provider-google-beta/pull/4427)) +* **New Resource:** google_cloud_tasks_queue_iam_member ([#4427](https://github.com/hashicorp/terraform-provider-google-beta/pull/4427)) +* **New Resource:** google_cloud_tasks_queue_iam_policy ([#4427](https://github.com/hashicorp/terraform-provider-google-beta/pull/4427)) +* **New Resource:** google_dataproc_autoscaling_policy_iam_binding ([#4441](https://github.com/hashicorp/terraform-provider-google-beta/pull/4441)) +* **New Resource:** google_dataproc_autoscaling_policy_iam_member ([#4441](https://github.com/hashicorp/terraform-provider-google-beta/pull/4441)) +* **New Resource:** google_dataproc_autoscaling_policy_iam_policy ([#4441](https://github.com/hashicorp/terraform-provider-google-beta/pull/4441)) +* **New Resource:** google_dataproc_metastore_service_iam_binding ([#4416](https://github.com/hashicorp/terraform-provider-google-beta/pull/4416)) +* **New Resource:** google_dataproc_metastore_service_iam_member ([#4416](https://github.com/hashicorp/terraform-provider-google-beta/pull/4416)) +* **New Resource:** google_dataproc_metastore_service_iam_policy ([#4416](https://github.com/hashicorp/terraform-provider-google-beta/pull/4416)) + +IMPROVEMENTS: +* bigquery: fixed a permadiff in `google_bigquery_job.query. destination_table` ([#4401](https://github.com/hashicorp/terraform-provider-google-beta/pull/4401)) +* billing: added `calendar_period` and `custom_period` fields to `google_billing_budget` ([#4429](https://github.com/hashicorp/terraform-provider-google-beta/pull/4429)) +* cloudsql: added attribute `project` to data source `google_sql_backup_run` ([#4402](https://github.com/hashicorp/terraform-provider-google-beta/pull/4402)) +* composer: added CMEK, PUPI and IP_masq_agent support for Composer 2 in `google_composer_environment` resource ([#4430](https://github.com/hashicorp/terraform-provider-google-beta/pull/4430)) +* compute: added `max_ports_per_vm` field to `google_compute_router_nat` resource ([#4400](https://github.com/hashicorp/terraform-provider-google-beta/pull/4400)) +* compute: added `GCE_VM_IP` support to `google_compute_network_endpoint_group` resource. ([#4434](https://github.com/hashicorp/terraform-provider-google-beta/pull/4434)) +* privateca: added support to subordinate CA activation ([#4422](https://github.com/hashicorp/terraform-provider-google-beta/pull/4422)) +* redis: added CMEK key field `customer_managed_key` in `google_redis_instance ` ([#4435](https://github.com/hashicorp/terraform-provider-google-beta/pull/4435)) +* spanner: added field `version_retention_period` to `google_spanner_database` resource ([#4424](https://github.com/hashicorp/terraform-provider-google-beta/pull/4424)) +* sql: added `settings.location_preference.secondary_zone` field in `google_sql_database_instance` ([#4433](https://github.com/hashicorp/terraform-provider-google-beta/pull/4433)) +* sql: added `sql_server_audit_config` field in `google_sql_database_instance` ([#4403](https://github.com/hashicorp/terraform-provider-google-beta/pull/4403)) + +BUG FIXES: +* composer: fixed a problem with updating Cloud Composer's `scheduler_count` field (https://github.com/hashicorp/terraform-provider-google/issues/11940) ([#4408](https://github.com/hashicorp/terraform-provider-google-beta/pull/4408)) +* composer: fixed permadiff on `private_environment_config.cloud_composer_connection_subnetwork` ([#4411](https://github.com/hashicorp/terraform-provider-google-beta/pull/4411)) +* container: fixed an issue where `node_config.min_cpu_platform` could cause a perma-diff in `google_container_cluster` ([#4426](https://github.com/hashicorp/terraform-provider-google-beta/pull/4426)) +* filestore: fixed a case where `google_filestore_instance.networks.network` would incorrectly see a diff between state and config when the network `id` format was used ([#4431](https://github.com/hashicorp/terraform-provider-google-beta/pull/4431)) +* serviceusage: fixed an issue where `google_project_service_identity` didn't handle service identities without emails correctly ([#4432](https://github.com/hashicorp/terraform-provider-google-beta/pull/4432)) + + +## 4.27.0 (June 27, 2022) IMPROVEMENTS: * clouddeploy: added `suspend` field to `google_clouddeploy_delivery_pipeline` resource ([#4394](https://github.com/hashicorp/terraform-provider-google-beta/pull/4394)) @@ -598,2913 +753,3 @@ BUG FIXES: * compute: fixed the `google_compute_instance_group_manager.update_policy.0.min_ready_sec` field so that updating it to `0` works ([#3810](https://github.com/hashicorp/terraform-provider-google-beta/pull/3810)) * compute: fixed the `google_compute_region_instance_group_manager.update_policy.0.min_ready_sec` field so that updating it to `0` works ([#3810](https://github.com/hashicorp/terraform-provider-google-beta/pull/3810)) * spanner: fixed the schema for `data.google_spanner_instance` so that non-configurable fields are considered outputs ([#3804](https://github.com/hashicorp/terraform-provider-google-beta/pull/3804)) - -## 3.90.1 (November 02, 2021) - -DEPRECATIONS: - -* container: fixed an overly-broad deprecation on `master_auth`, constraining it to `master_auth.username` and `master_auth.password` - -## 3.90.0 (October 26, 2021) - -DEPRECATIONS: -* container: deprecated `workload_identity_config.0.identity_namespace` and it will be removed in a future major release as it has been deprecated in the API. Use `workload_identity_config.0.workload_pool` instead. Switching your configuration from one value to the other will trigger a diff at plan time, and a spurious update. ([#3733](https://github.com/hashicorp/terraform-provider-google-beta/pull/3733)) -* container: deprecated the following `google_container_cluster` fields: `instance_group_urls` and `master_auth` ([#3746](https://github.com/hashicorp/terraform-provider-google-beta/pull/3746)) - -IMPROVEMENTS: -* composer: added field `environment_size` to resource `google_composer_environment` ([#3730](https://github.com/hashicorp/terraform-provider-google-beta/pull/3730)) -* container: added `node_config.0.guest_accelerator.0.gpu_partition_size` field to google_container_node_pool ([#3739](https://github.com/hashicorp/terraform-provider-google-beta/pull/3739)) -* container: added `workload_identity_config.0.workload_pool` to `google_container_cluster` ([#3733](https://github.com/hashicorp/terraform-provider-google-beta/pull/3733)) -* container: made `dns_cache_config` conflict with GKE Autopilot mode ([#3725](https://github.com/hashicorp/terraform-provider-google-beta/pull/3725)) -* container_cluster: Updated `monitoring_config` to accept `WORKLOAD` ([#3732](https://github.com/hashicorp/terraform-provider-google-beta/pull/3732)) -* provider: Added links to nested types documentation for manually generated pages ([#3736](https://github.com/hashicorp/terraform-provider-google-beta/pull/3736)) - -BUG FIXES: -* cloudrun: fixed a permadiff on the field `template.spec.containers.ports.name` of the `google_cloud_run_service` resource ([#3740](https://github.com/hashicorp/terraform-provider-google-beta/pull/3740)) -* composer: removed `config.node_config.zone` requirement on `google_composer_environment` ([#3745](https://github.com/hashicorp/terraform-provider-google-beta/pull/3745)) -* compute: fixed permadiff for `failover_policy` on `google_compute_region_backend_service` ([#3728](https://github.com/hashicorp/terraform-provider-google-beta/pull/3728)) -* compute: fixed to make `description` updatable without recreation on `google_compute_instance_group_manager` ([#3735](https://github.com/hashicorp/terraform-provider-google-beta/pull/3735)) -* container: fixed a permadiff on `google_container_node_pool.workload_metadata_config.mode` ([#3726](https://github.com/hashicorp/terraform-provider-google-beta/pull/3726)) -* iam: fixed request batching bug where failed requests would show unnecessary backslash escaping to the user. ([#3723](https://github.com/hashicorp/terraform-provider-google-beta/pull/3723)) -* securitycenter: fixed bug where `google_scc_notification_config.streaming_config.filter` was not updating. ([#3727](https://github.com/hashicorp/terraform-provider-google-beta/pull/3727)) - -## 3.89.0 (October 18, 2021) - -BUG FIXES: -* compute: fixed bug where `google_compute_router_peer` could not set an advertised route priority of 0, causing permadiff. ([#3718](https://github.com/hashicorp/terraform-provider-google-beta/pull/3718)) -* container: fixed a crash on `monitoring_config` of `google_container_cluster` ([#3717](https://github.com/hashicorp/terraform-provider-google-beta/pull/3717)) -* iam: fixed request batching bug where failed requests would show unnecessary backslash escaping to the user. ([#3723](https://github.com/hashicorp/terraform-provider-google-beta/pull/3723)) -* storage: fixed a bug to better handle eventual consistency among `google_storage_bucket` resources. ([#3715](https://github.com/hashicorp/terraform-provider-google-beta/pull/3715)) - -## 3.88.0 (October 11, 2021) -NOTES: -* reorganized documentation to group all Compute Engine and Monitoring (Stackdriver) resources together. ([#3686](https://github.com/hashicorp/terraform-provider-google-beta/pull/3686)) - -DEPRECATIONS: -* container: deprecated `workload_metadata_configuration.node_metadata` in favor of `workload_metadata_configuration.mode` in `google_container_cluster` ([#3694](https://github.com/hashicorp/terraform-provider-google-beta/pull/3694)) -* dataproc: deprecated the `google_dataproc_workflow_template.version` field, as it wasn't actually useful. The field is used during updates, but updates aren't currently possible with the resource. ([#3675](https://github.com/hashicorp/terraform-provider-google-beta/pull/3675)) -BREAKING CHANGES: -* gke_hub: made the `config_membership` field in `google_gke_hub_feature` required, disallowing invalid configurations ([#3681](https://github.com/hashicorp/terraform-provider-google-beta/pull/3681)) -* gke_hub: made the `configmanagement`, `feature`, `location`, `membership` fields in `google_gke_hub_feature_membership` required, disallowing invalid configurations ([#3681](https://github.com/hashicorp/terraform-provider-google-beta/pull/3681)) - -FEATURES: -* **New Data Source:** `google_service_networking_peered_dns_domain` ([#3690](https://github.com/hashicorp/terraform-provider-google-beta/pull/3690)) -* **New Data Source:** `google_sourcerepo_repository` ([#3684](https://github.com/hashicorp/terraform-provider-google-beta/pull/3684)) -* **New Data Source:** `google_storage_bucket` ([#3678](https://github.com/hashicorp/terraform-provider-google-beta/pull/3678)) -* **New Resource:** `google_pubsub_lite_reservation` ([#3708](https://github.com/hashicorp/terraform-provider-google-beta/pull/3708)) -* **New Resource:** `google_service_networking_peered_dns_domain` ([#3690](https://github.com/hashicorp/terraform-provider-google-beta/pull/3690)) - -IMPROVEMENTS: -* composer: added field `enable_privately_used_public_ips` to resource `google_composer_environment` (beta) ([#3697](https://github.com/hashicorp/terraform-provider-google-beta/pull/3697)) -* composer: added field `enable_ip_masq_agent` to resource `google_composer_environment` (beta) ([#3705](https://github.com/hashicorp/terraform-provider-google-beta/pull/3705)) -* composer: added support for composer v2 fields `workloads_config` and `cloud_composer_network_ipv4_cidr_block` to `composer_environment` ([#3709](https://github.com/hashicorp/terraform-provider-google-beta/pull/3709)) -* compute: added NetLB support for Connection Tracking as `connectionTrackingPolicy` in `RegionBackendService`(beta) ([#3698](https://github.com/hashicorp/terraform-provider-google-beta/pull/3698)) -* compute: added external IPv6 support on `google_compute_subnetwork` and `google_compute_instance.network_interfaces` ([#3677](https://github.com/hashicorp/terraform-provider-google-beta/pull/3677)) -* container: added support for `workload_metadata_configuration.mode` in `google_container_cluster` ([#3694](https://github.com/hashicorp/terraform-provider-google-beta/pull/3694)) -* eventarc: added support for `uid` output field, `cloud_function` destination to `google_eventarc_trigger` ([#3681](https://github.com/hashicorp/terraform-provider-google-beta/pull/3681)) -* gke_hub: added support for `gcp_service_account_email` when configuring Git sync in `google_gke_hub_feature_membership` ([#3681](https://github.com/hashicorp/terraform-provider-google-beta/pull/3681)) -* gke_hub: added support for `resource_state`, `state` outputs to `google_gke_hub_feature` ([#3681](https://github.com/hashicorp/terraform-provider-google-beta/pull/3681)) -* pubsub: added support for references to `google_pubsub_lite_reservation` to `google_pubsub_lite_topic`. ([#3708](https://github.com/hashicorp/terraform-provider-google-beta/pull/3708)) - -BUG FIXES: -* monitoring: fixed typo in `google_monitoring_uptime_check_config` where `NOT_MATCHES_REGEX` could not be specified. ([#3700](https://github.com/hashicorp/terraform-provider-google-beta/pull/3700)) -* servicedirectory: marked `service` on `google_service_directory_endpoint` as ForceNew to trigger recreates on changes ([#3683](https://github.com/hashicorp/terraform-provider-google-beta/pull/3683)) - -## 3.87.0 (October 04, 2021) - -DEPRECATIONS: -* dataproc: deprecated the `google_dataproc_workflow_template.version` field, as it wasn't actually useful. The field is used during updates, but updates aren't currently possible with the resource. ([#3675](https://github.com/hashicorp/terraform-provider-google-beta/pull/3675)) - -FEATURES: -* **New Resource:** `google_monitoring_monitored_project` ([#3658](https://github.com/hashicorp/terraform-provider-google-beta/pull/3658)) -* **New Resource:** `google_org_policy_policy` ([#3637](https://github.com/hashicorp/terraform-provider-google-beta/pull/3637)) - -IMPROVEMENTS: -* cloudbuild: added field `service_account` to `google_cloudbuild_trigger` ([#3661](https://github.com/hashicorp/terraform-provider-google-beta/pull/3661)) -* composer: added field `scheduler_count` to `google_composer_environment` ([#3660](https://github.com/hashicorp/terraform-provider-google-beta/pull/3660)) -* compute: Disabled recreation of GCE instances when updating `resource_policies` property ([#3668](https://github.com/hashicorp/terraform-provider-google-beta/pull/3668)) -* container: added support for `logging_config` and `monitoring_config` to `google_container_cluster` ([#3641](https://github.com/hashicorp/terraform-provider-google-beta/pull/3641)) -* kms: added support for `import_only` to `google_kms_crypto_key` ([#3659](https://github.com/hashicorp/terraform-provider-google-beta/pull/3659)) -* networkservices: boosted the default timeout for `google_network_services_edge_cache_origin` from 30m to 60m ([#3674](https://github.com/hashicorp/terraform-provider-google-beta/pull/3674)) - -BUG FIXES: -* container: fixed an issue where a node pool created with error (eg. GKE_STOCKOUT) would not be captured in state ([#3646](https://github.com/hashicorp/terraform-provider-google-beta/pull/3646)) -* filestore: Allowed updating `reserved_ip_range` on `google_filestore_instance` via recreation of the instance ([#3651](https://github.com/hashicorp/terraform-provider-google-beta/pull/3651)) -* serviceusage: Made the service api retry failed operation calls in anticipation of transient errors that occur when first enabling the service. ([#3666](https://github.com/hashicorp/terraform-provider-google-beta/pull/3666)) - -## 3.86.0 (September 27, 2021) - -BUG FIXES: -* dns: fixed an issue in `google_dns_record_set` where `rrdatas` could not be updated ([#3625](https://github.com/hashicorp/terraform-provider-google-beta/pull/3625)) -* dns: fixed an issue in `google_dns_record_set` where creating the resource would result in an 409 error ([#3625](https://github.com/hashicorp/terraform-provider-google-beta/pull/3625)) -* platform: fixed a bug in wrongly writing to state when creation failed on `google_organization_policy` ([#3624](https://github.com/hashicorp/terraform-provider-google-beta/pull/3624)) - -## 3.85.0 (September 20, 2021) -DEPRECATIONS: -* compute: deprecated `interface` field on `google_compute_disk` and `google_compute_region_disk` ([#3611](https://github.com/hashicorp/terraform-provider-google-beta/pull/3611)) - -IMPROVEMENTS: -* bigtable: enabled support for `user_project_override` in `google_bigtable_instance` and `google_bigtable_table` ([#3614](https://github.com/hashicorp/terraform-provider-google-beta/pull/3614)) -* compute: added `iap` fields to `google_compute_region_backend_service` ([#3605](https://github.com/hashicorp/terraform-provider-google-beta/pull/3605)) -* compute: allowed passing an IP address to the `nextHopIlb` field of `google_compute_route` resource ([#3609](https://github.com/hashicorp/terraform-provider-google-beta/pull/3609)) -* container: added field `dns_config` to resource `google_container_cluster` ([#3606](https://github.com/hashicorp/terraform-provider-google-beta/pull/3606)) -* iam: added `disabled` field to `google_service_account` resource ([#3603](https://github.com/hashicorp/terraform-provider-google-beta/pull/3603)) -* provider: added links to nested types documentation within a resource ([#3615](https://github.com/hashicorp/terraform-provider-google-beta/pull/3615)) -* storage: added field `path` to `google_storage_transfer_job` ([#3608](https://github.com/hashicorp/terraform-provider-google-beta/pull/3608)) - -BUG FIXES: -* appengine: fixed bug where `deployment.container.image` would update to an old version even if in `ignore_changes` ([#3613](https://github.com/hashicorp/terraform-provider-google-beta/pull/3613)) -* bigquery: fixed a bug where `destination_encryption_config.kms_key_name` stored the version rather than the key name. ([#3616](https://github.com/hashicorp/terraform-provider-google-beta/pull/3616)) -* redis: extended the default timeouts on `google_redis_instance` ([#3604](https://github.com/hashicorp/terraform-provider-google-beta/pull/3604)) -* serviceusage: fixed an issue in `google_project_service` where users could not reenable services that were disabled outside of Terraform. ([#3607](https://github.com/hashicorp/terraform-provider-google-beta/pull/3607)) - -## 3.84.0 (September 13, 2021) -DEPRECATIONS: -* compute: deprecated `interface` field on `google_compute_disk` and `google_compute_region_disk` ([#3611](https://github.com/hashicorp/terraform-provider-google-beta/pull/3611)) - -FEATURES: -* **New Data Source:** `google_secret_manager_secret` ([#3588](https://github.com/hashicorp/terraform-provider-google-beta/pull/3588)) - -IMPROVEMENTS: -* compute: added update support to `google_compute_service_attachment` ([#3587](https://github.com/hashicorp/terraform-provider-google-beta/pull/3587)) -* filestore: added `connect_mode` to `networks` field in `google_filestore_instance` ([#3595](https://github.com/hashicorp/terraform-provider-google-beta/pull/3595)) - -BUG FIXES: -* container: fixed a bug in failing to remove `maintenance_exclusion` on `google_container_cluster` ([#3600](https://github.com/hashicorp/terraform-provider-google-beta/pull/3600)) -* compute: fixed `advanced_machine_features` error messages in `google_compute_instance` ([#3598](https://github.com/hashicorp/terraform-provider-google-beta/pull/3598)) -* eventarc: fixed bug where resources deleted outside of Terraform would cause errors ([#3590](https://github.com/hashicorp/terraform-provider-google-beta/pull/3590)) -* functions: fixed an error message on `google_cloudfunctions_function` ([#3591](https://github.com/hashicorp/terraform-provider-google-beta/pull/3591)) -* logging: fixed the data type for `bucket_options.linear_buckets.width` on `google_logging_metric` ([#3589](https://github.com/hashicorp/terraform-provider-google-beta/pull/3589)) -* osconfig: fixed import on google_os_config_guest_policies ([#3594](https://github.com/hashicorp/terraform-provider-google-beta/pull/3594)) -* storage: fixed an undetected change on `days_since_noncurrent_time` of `google_storage_bucket` ([#3599](https://github.com/hashicorp/terraform-provider-google-beta/pull/3599)) - - -## 3.83.0 (September 09, 2021) -FEATURES: -* **New Resource:** `google_privateca_certificate_template` ([#3561](https://github.com/hashicorp/terraform-provider-google-beta/pull/3561)) - -IMPROVEMENTS: -* privateca: added `certificate_template` to `google_privateca_certificate`. ([#3567](https://github.com/hashicorp/terraform-provider-google-beta/pull/3567)) -* compute: allowed setting `ip_address` field of `google_compute_router_peer` ([#3565](https://github.com/hashicorp/terraform-provider-google-beta/pull/3565)) -* dataproc: added field `metastore_config` to `google_dataproc_cluster` ([#3577](https://github.com/hashicorp/terraform-provider-google-beta/pull/3577)) -* kms: added support for `destroy_scheduled_duration` to `google_kms_crypto_key` ([#3563](https://github.com/hashicorp/terraform-provider-google-beta/pull/3563)) - -BUG FIXES: -* endpoints: fixed a timezone discrepancy in `config_id` on `google_endpoints_service` ([#3564](https://github.com/hashicorp/terraform-provider-google-beta/pull/3564)) -* cloudbuild: marked `google_cloudbuild_trigger` as requiring one of branch_name/tag_name/commit_sha within build.source.repo_source ([#3582](https://github.com/hashicorp/terraform-provider-google-beta/pull/3582)) -* compute: fixed a crash on `enable` field of `google_compute_router_peer` ([#3579](https://github.com/hashicorp/terraform-provider-google-beta/pull/3579)) -* compute: fixed a permanent diff for `next_hop_instance_zone` on `google_compute_route` when `next_hop_instance` was set to a self link ([#3571](https://github.com/hashicorp/terraform-provider-google-beta/pull/3571)) -* compute: fixed an issue in `google_compute_router_nat` where removing `log_config` resulted in a perma-diff ([#3581](https://github.com/hashicorp/terraform-provider-google-beta/pull/3581)) -* privateca: fixed a permadiff bug for `publishing_options` on `google_privateca_ca_pool` when both attributes set false ([#3570](https://github.com/hashicorp/terraform-provider-google-beta/pull/3570)) -* spanner: fixed instance updates to processing units ([#3575](https://github.com/hashicorp/terraform-provider-google-beta/pull/3575)) -* storage: added support for timeouts on `google_storage_bucket_object` ([#3578](https://github.com/hashicorp/terraform-provider-google-beta/pull/3578)) - -## 3.82.0 (August 30, 2021) -FEATURES: -* **New Resource:** `google_privateca_certificate_template` ([#3561](https://github.com/hashicorp/terraform-provider-google-beta/pull/3561)) -* **New Resource:** `google_compute_firewall_policy` ([#3556](https://github.com/hashicorp/terraform-provider-google-beta/pull/3556)) -* **New Resource:** `google_compute_firewall_policy_association` ([#3556](https://github.com/hashicorp/terraform-provider-google-beta/pull/3556)) -* **New Resource:** `google_compute_firewall_policy_rule` ([#3556](https://github.com/hashicorp/terraform-provider-google-beta/pull/3556)) - -IMPROVEMENTS: -* notebooks: added support for `nic_type`, `reservation_affinity` to `google_notebooks_instance` ([#3554](https://github.com/hashicorp/terraform-provider-google-beta/pull/3554)) -* sql: added field `collation` to `google_sql_database_instance` ([#3557](https://github.com/hashicorp/terraform-provider-google-beta/pull/3557)) - -BUG FIXES: -* apigateway: fixed import functionality for all `apigateway` resources ([#3549](https://github.com/hashicorp/terraform-provider-google-beta/pull/3549)) -* compute: fixed a bug when a `source_machine_image` from a different project is used on `google_compute_instance_from_machine_image` ([#3541](https://github.com/hashicorp/terraform-provider-google-beta/pull/3541)) -* dns: fixed not-exists error message on data source `google_dns_managed_zone` ([#3559](https://github.com/hashicorp/terraform-provider-google-beta/pull/3559)) -* healthcare: fixed bug where changes to `google_healthcare_hl7_v2_store.parser_config` subfields would error with "...parser_config.version field is immutable..." ([#3560](https://github.com/hashicorp/terraform-provider-google-beta/pull/3560)) -* os_config: fixed imports for `google_os_config_guest_policies` ([#3550](https://github.com/hashicorp/terraform-provider-google-beta/pull/3550)) -* pubsub: added polling to `google_pubsub_schema` to deal with eventually consistent deletes ([#3544](https://github.com/hashicorp/terraform-provider-google-beta/pull/3544)) -* secretmanager: fixed an issue where `replication` fields would not update in `google_secret_manager_secret` ([#3558](https://github.com/hashicorp/terraform-provider-google-beta/pull/3558)) -* service_usage: fixed imports on `google_service_usage_consumer_quota_override` ([#3552](https://github.com/hashicorp/terraform-provider-google-beta/pull/3552)) -* sql: fixed a permadiff bug for `type` when BUILT_IN on `google_sql_user` ([#3545](https://github.com/hashicorp/terraform-provider-google-beta/pull/3545)) -* sql: fixed bug in `google_sql_user` with CLOUD_IAM_USERs on POSTGRES. ([#3542](https://github.com/hashicorp/terraform-provider-google-beta/pull/3542)) - -## 3.81.0 (August 23, 2021) - -IMPROVEMENTS: -* compute: Added `enable` attribute to `google_compute_router_peer` ([#3507](https://github.com/hashicorp/terraform-provider-google-beta/pull/3507)) -* compute: added support for `L3_DEFAULT` as `ip_protocol` for `google_compute_forwarding_rule` and `UNSPECIFIED` as `protocol` for `google_compute_region_backend_service` to support network load balancers that forward all protocols and ports. ([#3516](https://github.com/hashicorp/terraform-provider-google-beta/pull/3516)) -* compute: added support for `security_settings` to `google_compute_backend_service` ([#3515](https://github.com/hashicorp/terraform-provider-google-beta/pull/3515)) -* gkehub: added `google_gke_hub_membership` support for both `//container.googleapis.com/${google_container_cluster.my-cluster.id}` and `google_container_cluster.my-cluster.id` in `endpoint.0.gke_cluster.0.resource_link` ([#3502](https://github.com/hashicorp/terraform-provider-google-beta/pull/3502)) -* provider: Added provider support for `request_reason` ([#3513](https://github.com/hashicorp/terraform-provider-google-beta/pull/3513)) -* provider: added support for `billing_project` across all resources. If `user_project_override` is set to `true` and a `billing_project` is set, the `X-Goog-User-Project` header will be sent for all resources. ([#3539](https://github.com/hashicorp/terraform-provider-google-beta/pull/3539)) - -BUG FIXES: -* assuredworkloads: enhanced resource deletion so `google_assured_workloads_workload` can delete what it creates ([#3533](https://github.com/hashicorp/terraform-provider-google-beta/pull/3533)) -* bigquery: fixed the permadiff bug on `location` of the `google_bigquery_dataset` ([#3524](https://github.com/hashicorp/terraform-provider-google-beta/pull/3524)) -* composer: fixed environment version regexp to explicitly require . (dot) instead of any character after 'preview' (example: composer-2.0.0-preview.0-airflow-2.1.1) ([#3520](https://github.com/hashicorp/terraform-provider-google-beta/pull/3520)) -* compute: changed `wait_for_instances` in `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager` to no longer block plan / refresh, waiting on managed instance statuses during apply instead ([#3531](https://github.com/hashicorp/terraform-provider-google-beta/pull/3531)) -* compute: fixed a bug where `negative_caching_policy` cannot be set always revalidate on `google_compute_backend_service` ([#3529](https://github.com/hashicorp/terraform-provider-google-beta/pull/3529)) -* compute: fixed instances where compute resource calls would have their urls appended with a redundant `/projects` after the host ([#3532](https://github.com/hashicorp/terraform-provider-google-beta/pull/3532)) -* firestore: removed diff for server generated field `__name__` on `google_firestore_index` ([#3528](https://github.com/hashicorp/terraform-provider-google-beta/pull/3528)) -* privateca: Fixed null for `ignore_active_certificates_on_deletion` on the imported `google_privateca_certificate_authority` ([#3511](https://github.com/hashicorp/terraform-provider-google-beta/pull/3511)) -* privateca: fixed the creation of subordinate `google_privateca_certificate_authority` with `max_issuer_path_length = 0`. ([#3540](https://github.com/hashicorp/terraform-provider-google-beta/pull/3540)) - -## 3.80.0 (August 16, 2021) - -FEATURES: -* **New Resource:** `google_dialogflow_cx_environment` ([#3488](https://github.com/hashicorp/terraform-provider-google-beta/pull/3488)) - -IMPROVEMENTS: -* gkehub: added support for both `//container.googleapis.com/${google_container_cluster.my-cluster.id}` and `google_container_cluster.my-cluster.id` references in `google_gke_hub_membership.endpoint.0.gke_cluster.0.resource_link` ([#3502](https://github.com/hashicorp/terraform-provider-google-beta/pull/3502)) -* kms: added `name` field to `google_kms_crypto_key_version` datasource ([#3500](https://github.com/hashicorp/terraform-provider-google-beta/pull/3500)) - -BUG FIXES: -* apigee: fixed update behavior on `google_apigee_envgroup` ([#3489](https://github.com/hashicorp/terraform-provider-google-beta/pull/3489)) -* artifact_registry: transitioned the field `format` to be case insensitive in aligning with backend behavior on `google_artifact_registry_repository` ([#3491](https://github.com/hashicorp/terraform-provider-google-beta/pull/3491)) -* privateca: fixed a failure to create `google_privateca_certificate_authority` of type `SUBORDINATE` due to an invalid attempt to activate it on creation. ([#3499](https://github.com/hashicorp/terraform-provider-google-beta/pull/3499)) - -## 3.79.0 (August 09, 2021) - -NOTES: -* spanner: The `num_nodes` field on `google_spanner_instance` will have its default removed in a future major release, and either `num_nodes` or `processing_units` will be required. ([#3479](https://github.com/hashicorp/terraform-provider-google-beta/pull/3479)) - -FEATURES: -* **New Resource:** `google_dialogflow_cx_entity_type` ([#3480](https://github.com/hashicorp/terraform-provider-google-beta/pull/3480)) -* **New Resource:** `google_dialogflow_cx_page` ([#3461](https://github.com/hashicorp/terraform-provider-google-beta/pull/3461)) - -IMPROVEMENTS: -* container: added `network_config` block to `google_container_node_pool` resource ([#3472](https://github.com/hashicorp/terraform-provider-google-beta/pull/3472)) -* spanner: added `processing_units` to `google_spanner_instance`. ([#3479](https://github.com/hashicorp/terraform-provider-google-beta/pull/3479)) -* storage: added support for `customer_encryption` on `resource_storage_bucket_object` ([#3469](https://github.com/hashicorp/terraform-provider-google-beta/pull/3469)) - -## 3.78.0 (August 02, 2021) - -IMPROVEMENTS: -* composer: added validation for `max_pods_per_node` field. ([#3445](https://github.com/hashicorp/terraform-provider-google-beta/pull/3445)) -* servicenetworking: added support for `user_project_override` and `billing_project ` to `google_service_networking_connection` ([#3455](https://github.com/hashicorp/terraform-provider-google-beta/pull/3455)) - -BUG FIXES: -* storagetransfer: fixed a crash on `azure_blob_storage_data_source` for `google_storage_transfer_job` ([#3447](https://github.com/hashicorp/terraform-provider-google-beta/pull/3447)) -* sql: fixed bug that wouldn't insert the `google_sql_user` in state for iam users. ([#3442](https://github.com/hashicorp/terraform-provider-google-beta/pull/3442)) -* storage: fixed a crash when `azure_credentials` was defined in `google_storage_transfer_job` ([#3457](https://github.com/hashicorp/terraform-provider-google-beta/pull/3457)) - -## 3.77.0 (July 26, 2021) - -FEATURES: -* **New Resource:** `google_scc_notification_config` ([#3431](https://github.com/hashicorp/terraform-provider-google-beta/pull/3431)) - -IMPROVEMENTS: -* composer: added field `maintenance_window` to resource `google_composer_environment` ([#3435](https://github.com/hashicorp/terraform-provider-google-beta/pull/3435)) -* compute: fixed a permadiff bug in `log_config` field of `google_compute_region_backend_service` ([#3427](https://github.com/hashicorp/terraform-provider-google-beta/pull/3427)) -* dlp: added `crypto_replace_ffx_fpe_config` and `crypto_replace_ffx_fpe_config` as primitive transformation types to `google_data_loss_prevention_deidentify_template` ([#3429](https://github.com/hashicorp/terraform-provider-google-beta/pull/3429)) - -BUG FIXES: -* bigquerydatatransfer: fixed a bug where `destination_dataset_id` was required, it is now optional. ([#3438](https://github.com/hashicorp/terraform-provider-google-beta/pull/3438)) -* billing: Fixed ordering of `budget_filter. projects` on `google_billing_budget` ([#3436](https://github.com/hashicorp/terraform-provider-google-beta/pull/3436)) -* compute: removed default value of `0.8` from `google_backend_service.backend.max_utilization` and it will now default from API. All `max_connections_xxx` and `max_rate_xxx` will also default from API as these are all conditional on balancing mode. ([#3432](https://github.com/hashicorp/terraform-provider-google-beta/pull/3432)) -* sql: fixed bug where the provider would retry on an error if the database instance name couldn't be reused. ([#3434](https://github.com/hashicorp/terraform-provider-google-beta/pull/3434)) - -## 3.76.0 (July 19, 2021) -FEATURES: -* **New Resource:** `google_assured_workloads_workload` ([#3410](https://github.com/hashicorp/terraform-provider-google-beta/pull/3410)) -* **New Resource:** `google_dialogflow_cx_flow` ([#3422](https://github.com/hashicorp/terraform-provider-google-beta/pull/3422)) -* **New Resource:** `google_dialogflow_cx_intent` ([#3415](https://github.com/hashicorp/terraform-provider-google-beta/pull/3415)) -* **New Resource:** `google_dialogflow_cx_version` ([#3423](https://github.com/hashicorp/terraform-provider-google-beta/pull/3423)) -* **New Resource:** `google_network_services_edge_cache_keyset` ([#3417](https://github.com/hashicorp/terraform-provider-google-beta/pull/3417)) -* **New Resource:** `google_network_services_edge_cache_origin` ([#3417](https://github.com/hashicorp/terraform-provider-google-beta/pull/3417)) -* **New Resource:** `google_network_services_edge_cache_service` ([#3417](https://github.com/hashicorp/terraform-provider-google-beta/pull/3417)) -* **New Resource:** `google_vertex_ai_featurestore_entitytype` ([#3416](https://github.com/hashicorp/terraform-provider-google-beta/pull/3416)) -* **New Resource:** `google_vertex_ai_featurestore` ([#3416](https://github.com/hashicorp/terraform-provider-google-beta/pull/3416)) - -IMPROVEMENTS: -* apigee: Added SLASH_22 support for `peering_cidr_range` on `google_apigee_instance` ([#3424](https://github.com/hashicorp/terraform-provider-google-beta/pull/3424)) -* cloudbuild: Added `pubsub_config` and `webhook_config` parameter to `google_cloudbuild_trigger`. ([#3418](https://github.com/hashicorp/terraform-provider-google-beta/pull/3418)) - -BUG FIXES: -* pubsub: fixed pubsublite update issues ([#3421](https://github.com/hashicorp/terraform-provider-google-beta/pull/3421)) - -## 3.75.0 (July 12, 2021) - -BREAKING CHANGES: -* privateca: existing beta resources will no longer function ([#3397](https://github.com/hashicorp/terraform-provider-google-beta/pull/3397)) - -FEATURES: -* **New Resource:** google_privateca_ca_pool ([#3397](https://github.com/hashicorp/terraform-provider-google-beta/pull/3397)) -* **New Resource:** google_privateca_certificate ([#3397](https://github.com/hashicorp/terraform-provider-google-beta/pull/3397)) -* **New Resource:** google_privateca_certificate_authority ([#3397](https://github.com/hashicorp/terraform-provider-google-beta/pull/3397)) - -IMPROVEMENTS: -* bigquery: added `kms_key_version` as an output on `bigquery_table.encryption_configuration` and the `destination_encryption_configuration` blocks of `bigquery_job.query`, `bigquery_job.load`, and `bigquery_copy`. ([#3406](https://github.com/hashicorp/terraform-provider-google-beta/pull/3406)) -* compute: added `advanced_machine_features` to `google_compute_instance` ([#3392](https://github.com/hashicorp/terraform-provider-google-beta/pull/3392)) -* dlp: Added `replace_with_info_type_config` to `dlp_deidentify_template`. ([#3384](https://github.com/hashicorp/terraform-provider-google-beta/pull/3384)) -* storage: added `temporary_hold` and `event_based_hold` attributes to `google_storage_bucket_object` ([#3399](https://github.com/hashicorp/terraform-provider-google-beta/pull/3399)) - -BUG FIXES: -* bigquery: Fixed permadiff due to lowercase mode/type in `google_bigquery_table.schema` ([#3405](https://github.com/hashicorp/terraform-provider-google-beta/pull/3405)) -* billing: made `all_updates_rule.*` fields updatable on `google_billing_budget` ([#3394](https://github.com/hashicorp/terraform-provider-google-beta/pull/3394)) -* billing: made `amount.specified_amount.units` updatable on `google_billing_budget` ([#3391](https://github.com/hashicorp/terraform-provider-google-beta/pull/3391)) -* compute: fixed perma-diff in `google_compute_instance` ([#3389](https://github.com/hashicorp/terraform-provider-google-beta/pull/3389)) -* storage: fixed handling of object paths that contain slashes for `google_storage_object_access_control` ([#3407](https://github.com/hashicorp/terraform-provider-google-beta/pull/3407)) - -## 3.74.0 (June 28, 2021) - -FEATURES: -* **New Resource:** `google_app_engine_service_network_settings` ([#3371](https://github.com/hashicorp/terraform-provider-google-beta/pull/3371)) -* **New Resource:** `google_vertex_ai_dataset` ([#3369](https://github.com/hashicorp/terraform-provider-google-beta/pull/3369)) -* **New Resource:** `google_cloudbuild_worker_pool` ([#3372](https://github.com/hashicorp/terraform-provider-google-beta/pull/3372)) - -IMPROVEMENTS: -* bigtable: added `cluster.kms_key_name` field to `google_bigtable_instance` ([#3354](https://github.com/hashicorp/terraform-provider-google-beta/pull/3354)) -* composer: added field `max_pods_per_node` to resource `google_composer_environment` (beta) ([#3376](https://github.com/hashicorp/terraform-provider-google-beta/pull/3376)) -* secretmanager: added `ttl`, `expire_time`, `topics` and `rotation` fields to `google_secret_manager_secret` ([#3360](https://github.com/hashicorp/terraform-provider-google-beta/pull/3360)) - -BUG FIXES: -* container: allowed setting `node_config.service_account` at the same time as `enable_autopilot = true` for `google_container_cluster` ([#3361](https://github.com/hashicorp/terraform-provider-google-beta/pull/3361)) -* container: fixed issue where creating a node pool with a name that already exists would import that resource. `google_container_node_pool` ([#3378](https://github.com/hashicorp/terraform-provider-google-beta/pull/3378)) -* dataproc: fixed crash when creating `google_dataproc_workflow_template` with `secondary_worker_config` empty except for `num_instances = 0` ([#3347](https://github.com/hashicorp/terraform-provider-google-beta/pull/3347)) -* filestore: fixed an issue in `google_filestore_instance` where creating two instances simultaneously resulted in an error. ([#3358](https://github.com/hashicorp/terraform-provider-google-beta/pull/3358)) -* iam: fixed an issue in `google_iam_workload_identity_pool_provider` where `aws` and `oidc` were not updatable. ([#3350](https://github.com/hashicorp/terraform-provider-google-beta/pull/3350)) -* sql: added support for `binary_logging` on replica instances for `googe_sql_database_instance` ([#3379](https://github.com/hashicorp/terraform-provider-google-beta/pull/3379)) - -## 3.73.0 (June 21, 2021) -FEATURES: -* **New Resource:** `google_compute_service_attachment` ([#3328](https://github.com/hashicorp/terraform-provider-google-beta/pull/3328)) -* **New Resource:** `google_dialogflow_cx_agent` ([#3324](https://github.com/hashicorp/terraform-provider-google-beta/pull/3324)) -* **New Resource:** `google_gkehub_feature` ([#3330](https://github.com/hashicorp/terraform-provider-google-beta/pull/3330)) -* **New Resource:** `google_gkehub_feature_membership` ([#3330](https://github.com/hashicorp/terraform-provider-google-beta/pull/3330)) - -IMPROVEMENTS: -* provider: added support for [mtls authentication](https://google.aip.dev/auth/4114) ([#3348](https://github.com/hashicorp/terraform-provider-google-beta/pull/3348)) -* compute: added field `adaptive_protection_config` to `google_compute_security_policy` ([#3322](https://github.com/hashicorp/terraform-provider-google-beta/pull/3322)) -* compute: added `advanced_machine_features` fields to `google_compute_instance_template` ([#3337](https://github.com/hashicorp/terraform-provider-google-beta/pull/3337)) -* compute: added a `network_performance_config` block to each of `resource_compute_instance`, `resource_compute_instance_from_template`, and `resource_compute_instance_template` ([#3341](https://github.com/hashicorp/terraform-provider-google-beta/pull/3341)) -* redis: allowed `redis_version` to be upgraded on `google_redis_instance` ([#3344](https://github.com/hashicorp/terraform-provider-google-beta/pull/3344)) - -BUG FIXES: -* apigee: added SLASH_23 support for `peering_cidr_range` on `google_apigee_instance` ([#3327](https://github.com/hashicorp/terraform-provider-google-beta/pull/3327)) -* cloudrun: fixed a bug where plan would should a diff on `google_cloud_run_service` if the order of the `template.spec.containers.env` list was re-ordered outside of terraform. ([#3326](https://github.com/hashicorp/terraform-provider-google-beta/pull/3326)) -* container: added `user_project_override` support to the ContainerOperationWaiter used by `google_container_cluster` ([#3345](https://github.com/hashicorp/terraform-provider-google-beta/pull/3345)) - -## 3.72.0 (June 14, 2021) -IMPROVEMENTS: -* container: Allowed specifying a cluster id field for `google_container_node_pool.cluster` to ensure that a node pool is recreated if the associated cluster is recreated. ([#3314](https://github.com/hashicorp/terraform-provider-google-beta/pull/3314)) -* storagetransfer: added support for `azure_blob_storage_data_source` to `google_storage_transfer_job` ([#3316](https://github.com/hashicorp/terraform-provider-google-beta/pull/3316)) - -BUG FIXES: -* bigquery: Fixed `google_bigquery_table.schema` handling of policyTags ([#3307](https://github.com/hashicorp/terraform-provider-google-beta/pull/3307)) -* bigtable: fixed bug that would error if creating multiple bigtable gc policies at the same time ([#3311](https://github.com/hashicorp/terraform-provider-google-beta/pull/3311)) -* compute: fixed bug where `encryption` showed a perma-diff on resources created prior to the feature being released. ([#3309](https://github.com/hashicorp/terraform-provider-google-beta/pull/3309)) -* dataflow: fixed handling of failed `google_dataflow_flex_template_job` updates ([#3318](https://github.com/hashicorp/terraform-provider-google-beta/pull/3318)) -* dataflow: made `google_dataflow_flex_template_job` updates fail fast if the job is in the process of cancelling or draining([#3317](https://github.com/hashicorp/terraform-provider-google-beta/pull/3317)) - -## 3.71.0 (June 07, 2021) -FEATURES: -* **New Resource:** `google_dialogflow_fulfillment` ([#3286](https://github.com/hashicorp/terraform-provider-google-beta/pull/3286)) - -IMPROVEMENTS: -* compute: added `reservation_affinity` to `google_compute_instance` and `google_compute_instance_template` ([#3288](https://github.com/hashicorp/terraform-provider-google-beta/pull/3288)) -* compute: added support for `wait_for_instances_status` on `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager` ([#3283](https://github.com/hashicorp/terraform-provider-google-beta/pull/3283)) -* compute: added support for output-only `status` field on `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager` ([#3283](https://github.com/hashicorp/terraform-provider-google-beta/pull/3283)) -* compute: set the default value for log_config.enable on `google_compute_region_health_check` to avoid permanent diff on plan/apply. ([#3291](https://github.com/hashicorp/terraform-provider-google-beta/pull/3291)) - -BUG FIXES: -* composer: fixed a check that did not allow for preview versions in `google_composer_environment` ([#3287](https://github.com/hashicorp/terraform-provider-google-beta/pull/3287)) -* storage: fixed error when `matches_storage_class` is set empty on `google_storage_bucket` ([#3282](https://github.com/hashicorp/terraform-provider-google-beta/pull/3282)) -* vpcaccess: fixed permadiff when `max_throughput` is not set on `google_vpc_access_connector` ([#3294](https://github.com/hashicorp/terraform-provider-google-beta/pull/3294)) - -## 3.70.0 (June 01, 2021) -IMPROVEMENTS: -* compute: added `provisioned_iops` to `google_compute_disk` ([#3269](https://github.com/hashicorp/terraform-provider-google-beta/pull/3269)) -* sql: added field `disk_autoresize_limit` to `sql_database_instance` ([#3273](https://github.com/hashicorp/terraform-provider-google-beta/pull/3273)) - -BUG FIXES: -* cloudrun: fixed a bug where resources would return successfully due to responses based on a previous version of the resource ([#3277](https://github.com/hashicorp/terraform-provider-google-beta/pull/3277)) -* compute: fixed issue where `google_compute_region_disk` and `google_compute_disk` would force recreation due to the addition of `interface` property ([#3272](https://github.com/hashicorp/terraform-provider-google-beta/pull/3272)) -* compute: fixed missing values for `negative_caching` and `serve_while_stale` on `google_compute_backend_service` ([#3278](https://github.com/hashicorp/terraform-provider-google-beta/pull/3278)) -* storage: fixed error when `matches_storage_class` is set empty on `google_storage_bucket` ([#3282](https://github.com/hashicorp/terraform-provider-google-beta/pull/3282)) - -## 3.69.0 (May 24, 2021) - -IMPROVEMENTS: -* apigateway: allowed field `apiconfig` to change on resource `google_apigateway_gateway` ([#3248](https://github.com/hashicorp/terraform-provider-google-beta/pull/3248)) -* compute: added "description" field to "google_compute_resource_policy" resource ([#3263](https://github.com/hashicorp/terraform-provider-google-beta/pull/3263)) -* compute: added "instance_schedule_policy" field to "google_compute_resource_policy" resource ([#3263](https://github.com/hashicorp/terraform-provider-google-beta/pull/3263)) -* compute: added support for IPsec-encrypted Interconnect in the form of new fields on `google_compute_router`, `google_compute_ha_vpn_gateway`, `google_compute_interconnect_attachment` and `google_compute_address` ([#3256](https://github.com/hashicorp/terraform-provider-google-beta/pull/3256)) -* dataflow: enabled updates for `google_dataflow_flex_template_job` ([#3246](https://github.com/hashicorp/terraform-provider-google-beta/pull/3246)) - -BUG FIXES: -* cloudidentity: fixed recreation on the `initial_group_config` of `google_cloud_identity_group` ([#3252](https://github.com/hashicorp/terraform-provider-google-beta/pull/3252)) -* compute: added mutex in `google_compute_metadata_item` to reduce retries + quota errors ([#3262](https://github.com/hashicorp/terraform-provider-google-beta/pull/3262)) -* container: fixed bug where `enable_shielded_nodes` could not be false on resource `google_container_cluster` ([#3247](https://github.com/hashicorp/terraform-provider-google-beta/pull/3247)) - -## 3.68.0 (May 18, 2021) -FEATURES: -* **New Resource:** `google_pubsub_schema` ([#3243](https://github.com/hashicorp/terraform-provider-google-beta/pull/3243)) - -IMPROVEMENTS: -* compute: added `initial_size` in resource `google_compute_node_group` to account for scenarios where size may change under the hood ([#3228](https://github.com/hashicorp/terraform-provider-google-beta/pull/3228)) -* compute: added support for setting `kms_key_name` on `google_compute_machine_image` ([#3241](https://github.com/hashicorp/terraform-provider-google-beta/pull/3241)) -* dataflow: enabled updates for `google_dataflow_flex_template_job` ([#3246](https://github.com/hashicorp/terraform-provider-google-beta/pull/3246)) - -BUG FIXES: -* compute: Fixed permadiff for `cdn_policy.serve_while_stale` and `cdn_policy.*_ttl` in `google_compute_region_backend_service` (beta) ([#3230](https://github.com/hashicorp/terraform-provider-google-beta/pull/3230)) -* compute: fixed bug where, when an organization security policy association was removed outside of terraform, the next plan/apply would fail. ([#3234](https://github.com/hashicorp/terraform-provider-google-beta/pull/3234)) -* container: added validation to check that both `node_version` and `remove_default_node_pool` cannot be set on `google_container_cluster` ([#3237](https://github.com/hashicorp/terraform-provider-google-beta/pull/3237)) -* dns: suppressed spurious diffs due to case changes in DS records ([#3236](https://github.com/hashicorp/terraform-provider-google-beta/pull/3236)) - -## 3.67.0 (May 10, 2021) -NOTES: -* all: changed default HTTP request timeout from 30 seconds to 120 seconds ([#3181](https://github.com/hashicorp/terraform-provider-google-beta/pull/3181)) -BREAKING CHANGES: -* bigquery: updating `dataset_id` or `project_id` in `google_bigquery_dataset` will now recreate the resource ([#3185](https://github.com/hashicorp/terraform-provider-google-beta/pull/3185)) - -IMPROVEMENTS: -* accesscontextmanager: added support for `require_verified_chrome_os` in basic access levels. ([#3223](https://github.com/hashicorp/terraform-provider-google-beta/pull/3223)) -* billingbudget: added support for import of `google_billing_budget` ([#3194](https://github.com/hashicorp/terraform-provider-google-beta/pull/3194)) -* cloud_identity: added support for `initial_group_config` to the google_cloud_identity_group resource ([#3211](https://github.com/hashicorp/terraform-provider-google-beta/pull/3211)) -* cloudrun: added support to bind secrets from Secret Manager to environment variables or files to `google_cloud_run_service` ([#3225](https://github.com/hashicorp/terraform-provider-google-beta/pull/3225)) -* compute: added `initial_size` to account for scenarios where size may change under the hood in resource `google_compute_node_group` ([#3228](https://github.com/hashicorp/terraform-provider-google-beta/pull/3228)) -* compute: added `interface` field to `google_compute_region_disk` ([#3193](https://github.com/hashicorp/terraform-provider-google-beta/pull/3193)) -* healthcare: added support for `stream_configs` in `google_healthcare_dicom_store` ([#3190](https://github.com/hashicorp/terraform-provider-google-beta/pull/3190)) -* secretmanager: added support for setting a CMEK on `google_secret_manager_secret` ([#3212](https://github.com/hashicorp/terraform-provider-google-beta/pull/3212)) -* spanner: added `force_destroy` to `google_spanner_instance` to delete instances that have backups enabled. ([#3227](https://github.com/hashicorp/terraform-provider-google-beta/pull/3227)) -* spanner: added support for setting a CMEK on `google_spanner_database` ([#3181](https://github.com/hashicorp/terraform-provider-google-beta/pull/3181)) -* workflows: marked `source_contents` and `service_account` as updatable on `google_workflows_workflow` ([#3205](https://github.com/hashicorp/terraform-provider-google-beta/pull/3205)) - -BUG FIXES: -* bigquery: fixed `dataset_id` to force new resource if name is changed. ([#3185](https://github.com/hashicorp/terraform-provider-google-beta/pull/3185)) -* cloudrun: fixed permadiff on `google_cloud_run_domain_mapping.metadata.labels` ([#3183](https://github.com/hashicorp/terraform-provider-google-beta/pull/3183)) -* composer: changed `google_composer_environment.master_ipv4_cidr_block` to draw default from the API ([#3204](https://github.com/hashicorp/terraform-provider-google-beta/pull/3204)) -* compute: fixed the failure when `min_required_replicas` is set to 0 on `google_compute_autoscaler` or `google_compute_region_autoscaler` ([#3203](https://github.com/hashicorp/terraform-provider-google-beta/pull/3203)) -* container: fixed container node pool not removed from the state when received 404 error on delete call for the resource `google_container_node_pool` ([#3210](https://github.com/hashicorp/terraform-provider-google-beta/pull/3210)) -* dns: fixed empty `rrdatas` list on `google_dns_record_set` for AAAA records ([#3207](https://github.com/hashicorp/terraform-provider-google-beta/pull/3207)) -* kms: fixed indirectly force replacement via `skip_initial_version_creation` on `google_kms_crypto_key` ([#3192](https://github.com/hashicorp/terraform-provider-google-beta/pull/3192)) -* logging: fixed `metric_descriptor.labels` can't be updated on 'google_logging_metric' ([#3217](https://github.com/hashicorp/terraform-provider-google-beta/pull/3217)) -* pubsub: fixed diff for `minimum_backoff & maximum_backoff` on `google_pubsub_subscription` ([#3214](https://github.com/hashicorp/terraform-provider-google-beta/pull/3214)) -* resourcemanager: fixed broken handling of IAM conditions for `google_organization_iam_member`, `google_organization_iam_binding`, and `google_organization_iam_policy` ([#3213](https://github.com/hashicorp/terraform-provider-google-beta/pull/3213)) -* serviceusage: added `google_project_service.service` validation to reject invalid service domains that don't contain a period ([#3191](https://github.com/hashicorp/terraform-provider-google-beta/pull/3191)) -* storage: fixed bug where `role_entity` user wouldn't update if the role changed. ([#3199](https://github.com/hashicorp/terraform-provider-google-beta/pull/3199)) - -## 3.66.1 (April 29, 2021) -BUG FIXES: -* compute: fixed bug where terraform would crash if updating from no `service_account.scopes` to more. ([#3208](https://github.com/hashicorp/terraform-provider-google-beta/pull/3208)) - -## 3.66.0 (April 28, 2021) - -NOTES: -* all: changed default HTTP request timeout from 30 seconds to 120 seconds ([#3181](https://github.com/hashicorp/terraform-provider-google-beta/pull/3181)) - -BREAKING CHANGES: -* datacatalog: updating `parent` in `google_data_catalog_tag` will now recreate the resource ([#3179](https://github.com/hashicorp/terraform-provider-google-beta/pull/3179)) - -FEATURES: -* **New Data Source:** `google_compute_ha_vpn_gateway` ([#3173](https://github.com/hashicorp/terraform-provider-google-beta/pull/3173)) -* **New Resource:** `google_dataproc_workflow_template` ([#3178](https://github.com/hashicorp/terraform-provider-google-beta/pull/3178)) - -IMPROVEMENTS: -* bigquery: Added BigTable source format in BigQuery table ([#3165](https://github.com/hashicorp/terraform-provider-google-beta/pull/3165)) -* cloudfunctions: removed bounds on the supported memory range in `google_cloudfunctions_function.available_memory_mb` ([#3171](https://github.com/hashicorp/terraform-provider-google-beta/pull/3171)) -* compute: marked scheduling.0.node_affinities as updatable in `google_compute_instance` ([#3166](https://github.com/hashicorp/terraform-provider-google-beta/pull/3166)) -* dataproc: added `shielded_instance_config` fields to `google_dataproc_cluster` ([#3157](https://github.com/hashicorp/terraform-provider-google-beta/pull/3157)) -* spanner: added support for setting a CMEK on `google_spanner_database` ([#3181](https://github.com/hashicorp/terraform-provider-google-beta/pull/3181)) - -BUG FIXES: -* compute: fixed error when creating empty `scopes` on `google_compute_instance` ([#3174](https://github.com/hashicorp/terraform-provider-google-beta/pull/3174)) -* container: fixed a bug that allowed specifying `node_config` on `google_container_cluster` when autopilot is used ([#3155](https://github.com/hashicorp/terraform-provider-google-beta/pull/3155)) -* datacatalog: fixed an issue where `parent` in `google_data_catalog_tag` attempted to update the resource when change instead of recreating it ([#3179](https://github.com/hashicorp/terraform-provider-google-beta/pull/3179)) -* datacatalog: set default false for `force_delete` on `google_data_catalog_tag_template` ([#3164](https://github.com/hashicorp/terraform-provider-google-beta/pull/3164)) -* dns: added missing record types to `google_dns_record_set` resource ([#3160](https://github.com/hashicorp/terraform-provider-google-beta/pull/3160)) -* sql: set `clone.point_in_time` optional for `google_sql_database_instance` ([#3180](https://github.com/hashicorp/terraform-provider-google-beta/pull/3180)) - -## 3.65.0 (April 20, 2021) - -FEATURES: -* **New Data Source:** `google_kms_secret_asymmetric` ([#3141](https://github.com/hashicorp/terraform-provider-google-beta/pull/3141)) - -IMPROVEMENTS: -* compute: added the ability to specify `google_compute_forwarding_rule.ip_address` by a reference in addition to raw IP address ([#3140](https://github.com/hashicorp/terraform-provider-google-beta/pull/3140)) -* compute: enabled fields `advertiseMode`, `advertisedGroups`, `peerAsn`, and `peerIpAddress` to be updatable on resource `google_compute_router_peer` ([#3134](https://github.com/hashicorp/terraform-provider-google-beta/pull/3134)) -* eventarc: added `transport.pubsub.topic` to `google_eventarc_trigger` ([#3149](https://github.com/hashicorp/terraform-provider-google-beta/pull/3149)) - -BUG FIXES: -* cloud_identity: fixed google_cloud_identity_group_membership import/update ([#3136](https://github.com/hashicorp/terraform-provider-google-beta/pull/3136)) -* compute: removed minimum for `scopes` field on `google_compute_instance` resource ([#3147](https://github.com/hashicorp/terraform-provider-google-beta/pull/3147)) -* iam: fixed issue with principle and principleSet members not retaining their casing ([#3133](https://github.com/hashicorp/terraform-provider-google-beta/pull/3133)) -* workflows: fixed a bug in `google_workflows_workflow` that could cause inconsistent final plan errors when using the `name` field in other resources ([#3138](https://github.com/hashicorp/terraform-provider-google-beta/pull/3138)) - -## 3.64.0 (April 12, 2021) - -FEATURES: -* **New Resource:** `google_tags_tag_binding` ([#3121](https://github.com/hashicorp/terraform-provider-google-beta/pull/3121)) -* **New Resource:** `google_tags_tag_key_iam_binding` ([#3124](https://github.com/hashicorp/terraform-provider-google-beta/pull/3124)) -* **New Resource:** `google_tags_tag_key_iam_member` ([#3124](https://github.com/hashicorp/terraform-provider-google-beta/pull/3124)) -* **New Resource:** `google_tags_tag_key_iam_policy` ([#3124](https://github.com/hashicorp/terraform-provider-google-beta/pull/3124)) -* **New Resource:** `google_tags_tag_value_iam_binding` ([#3124](https://github.com/hashicorp/terraform-provider-google-beta/pull/3124)) -* **New Resource:** `google_tags_tag_value_iam_member` ([#3124](https://github.com/hashicorp/terraform-provider-google-beta/pull/3124)) -* **New Resource:** `google_tags_tag_value_iam_policy` ([#3124](https://github.com/hashicorp/terraform-provider-google-beta/pull/3124)) -* **New Resource:** `google_apigee_envgroup_attachment` ([#3129](https://github.com/hashicorp/terraform-provider-google-beta/pull/3129)) - -IMPROVEMENTS: -* bigquery: added `require_partition_filter` field to `google_bigquery_table` when provisioning `hive_partitioning_options` ([#3106](https://github.com/hashicorp/terraform-provider-google-beta/pull/3106)) -* cloudbuild: added new machine types for `google_cloudbuild_trigger` ([#3115](https://github.com/hashicorp/terraform-provider-google-beta/pull/3115)) -* compute: added field `maintenance_window.start_time` to `google_compute_node_group` ([#3125](https://github.com/hashicorp/terraform-provider-google-beta/pull/3125)) -* compute: added gVNIC support for `google_compute_instance_template` ([#3123](https://github.com/hashicorp/terraform-provider-google-beta/pull/3123)) -* datacatalog: added `description` field to `google_data_catalog_tag_template ` resource ([#3128](https://github.com/hashicorp/terraform-provider-google-beta/pull/3128)) -* iam: added support for third party identities via the principle and principleSet IAM members ([#3133](https://github.com/hashicorp/terraform-provider-google-beta/pull/3133)) - -BUG FIXES: -* compute: reverted datatype change for `mtu` in `google_compute_interconnect_attachment` as it was incompatible with existing state representation ([#3112](https://github.com/hashicorp/terraform-provider-google-beta/pull/3112)) -* iam: fixed issue with principle and principleSet members not retaining their casing ([#3133](https://github.com/hashicorp/terraform-provider-google-beta/pull/3133)) -* storage: fixed intermittent `Provider produced inconsistent result after apply` error when creating ([#3107](https://github.com/hashicorp/terraform-provider-google-beta/pull/3107)) - -## 3.63.0 (April 5, 2021) - -FEATURES: -* **New Data Source:** `google_monitoring_istio_canonical_service` ([#3092](https://github.com/hashicorp/terraform-provider-google-beta/pull/3092)) -* **New Resource:** `google_apigee_instance_attachment` ([#3093](https://github.com/hashicorp/terraform-provider-google-beta/pull/3093)) -* **New Resource:** `google_gke_hub_membership` ([#3079](https://github.com/hashicorp/terraform-provider-google-beta/pull/3079)) -* **New Resource:** `google_tags_tag_value` ([#3097](https://github.com/hashicorp/terraform-provider-google-beta/pull/3097)) - -IMPROVEMENTS: -* added support for Apple silicon chip (updated to go 1.16) ([#3057](https://github.com/hashicorp/terraform-provider-google-beta/pull/3057)) -* container: - * added support for GKE Autopilot in `google_container_cluster`([#3101](https://github.com/hashicorp/terraform-provider-google-beta/pull/3101)) - * added `enable_l4_ilb_subsetting` (beta) and `private_ipv6_google_access` fields to `google_container_cluster` ([#3095](https://github.com/hashicorp/terraform-provider-google-beta/pull/3095)) -* sql: changed the default timeout of `google_sql_database_instance` to 30m from 20m ([#3099](https://github.com/hashicorp/terraform-provider-google-beta/pull/3099)) - -BUG FIXES: -* bigquery: fixed issue where you couldn't extend an existing `schema` with additional columns in `google_bigquery_table` ([#3100](https://github.com/hashicorp/terraform-provider-google-beta/pull/3100)) -* cloudidentity: modified `google_cloud_identity_groups` and `google_cloud_identity_group_memberships ` to respect the `user_project_override` and `billing_project` configurations and send the appropriate headers to establish a quota project ([#3081](https://github.com/hashicorp/terraform-provider-google-beta/pull/3081)) -* compute: added minimum for `scopes` field to `google_compute_instance` resource ([#3098](https://github.com/hashicorp/terraform-provider-google-beta/pull/3098)) -* notebooks: fixed permadiff on labels for `google_notebook_instance` ([#3096](https://github.com/hashicorp/terraform-provider-google-beta/pull/3096)) -* secretmanager: set required on `secrest_data` in `google_secret_manager_secret_version` ([#3094](https://github.com/hashicorp/terraform-provider-google-beta/pull/3094)) - - -## 3.62.0 (March 27, 2021) - -FEATURES: -* **New Data Source:** `google_compute_health_check` ([#3066](https://github.com/hashicorp/terraform-provider-google-beta/pull/3066)) -* **New Data Source:** `google_kms_secret_asymmetric` ([#3076](https://github.com/hashicorp/terraform-provider-google-beta/pull/3076)) -* **New Resource:** `google_gke_hub_membership` ([#3079](https://github.com/hashicorp/terraform-provider-google-beta/pull/3079)) -* **New Resource:** `google_tags_tag_key` ([#3062](https://github.com/hashicorp/terraform-provider-google-beta/pull/3062)) -* **New Resource:** `google_data_catalog_tag_template_iam_*` ([#3071](https://github.com/hashicorp/terraform-provider-google-beta/pull/3071)) - -IMPROVEMENTS: -* accesscontextmanager: added support for ingress and egress policies to `google_access_context_manager_service_perimeter` ([#3064](https://github.com/hashicorp/terraform-provider-google-beta/pull/3064)) -* artifactregistry: relaxed field validations for field `format` on `google_artifact_registry_repository` ([#3068](https://github.com/hashicorp/terraform-provider-google-beta/pull/3068)) -* compute: added `proxy_bind` to `google_compute_target_tcp_proxy`, `google_compute_target_http_proxy` and `google_compute_target_https_proxy` ([#3061](https://github.com/hashicorp/terraform-provider-google-beta/pull/3061)) - -BUG FIXES: -* compute: fixed an issue where exceeding the operation rate limit would fail without retrying ([#3077](https://github.com/hashicorp/terraform-provider-google-beta/pull/3077)) -* compute: corrected underlying type to integer for field `mtu` in `google_compute_interconnect_attachment` ([#3075](https://github.com/hashicorp/terraform-provider-google-beta/pull/3075) - -## 3.61.0 (March 23, 2021) - -IMPROVEMENTS: -* provider: The provider now supports [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation). The federated json credentials must be loaded through the `GOOGLE_APPLICATION_CREDENTIALS` environment variable. ([#3054](https://github.com/hashicorp/terraform-provider-google-beta/pull/3054)) -* compute: added `proxy_bind` to `google_compute_target_tcp_proxy`, `google_compute_target_http_proxy` and `google_compute_target_https_proxy` ([#3061](https://github.com/hashicorp/terraform-provider-google-beta/pull/3061)) -* compute: changed `google_compute_subnetwork` to accept more values in the `purpose` field ([#3043](https://github.com/hashicorp/terraform-provider-google-beta/pull/3043)) -* dataflow: added `enable_streaming_engine` argument ([#3049](https://github.com/hashicorp/terraform-provider-google-beta/pull/3049)) -* vpcaccess: added `subnet`, `machine_type` beta fields to `google_vpc_access_connector` ([#3042](https://github.com/hashicorp/terraform-provider-google-beta/pull/3042)) - -BUG FIXES: -* bigtable: fixed bug where gc_policy would attempt to recreate the resource when switching from deprecated attribute but maintaining the same value underlying value ([#3037](https://github.com/hashicorp/terraform-provider-google-beta/pull/3037)) -* binaryauthorization: fixed permadiff in `google_binary_authorization_attestor` ([#3035](https://github.com/hashicorp/terraform-provider-google-beta/pull/3035)) -* container: Fixed updates on `export_custom_routes` and `import_custom_routes` in `google_compute_network_peering` ([#3045](https://github.com/hashicorp/terraform-provider-google-beta/pull/3045)) - -## 3.60.0 (March 15, 2021) - -FEATURES: -* **New Resource:** `google_workflows_workflow` ([#2989](https://github.com/hashicorp/terraform-provider-google-beta/pull/2989)) -* **New Resource:** google_apigee_envgroup ([#3039](https://github.com/hashicorp/terraform-provider-google-beta/pull/3039)) -* **New Resource:** google_apigee_environment ([#3020](https://github.com/hashicorp/terraform-provider-google-beta/pull/3020)) -* **New Resource:** google_apigee_instance ([#2986](https://github.com/hashicorp/terraform-provider-google-beta/pull/2986)) - -IMPROVEMENTS: -* cloudrun: suppressed metadata.labels["cloud.googleapis.com/location"] value in `google_cloud_run_service` ([#3005](https://github.com/hashicorp/terraform-provider-google-beta/pull/3005)) -* compute: added `mtu` field to `google_compute_interconnect_attachment` ([#3006](https://github.com/hashicorp/terraform-provider-google-beta/pull/3006)) -* compute: added autoscaling_policy.cpu_utilization.predictive_method field to `google_compute_autoscaler` and `google_compute_region_autoscaler` ([#2987](https://github.com/hashicorp/terraform-provider-google-beta/pull/2987)) -* compute: added support for `nic_type` to `google_compute_instance` (GA only) ([#2998](https://github.com/hashicorp/terraform-provider-google-beta/pull/2998)) -* container: added field `ephemeral_storage_config` to resource `google_container_node_pool` and `google_container_cluster` (beta) ([#3023](https://github.com/hashicorp/terraform-provider-google-beta/pull/3023)) -* datafusion: added support for the `DEVELOPER` instance type to `google_data_fusion_instance` ([#3015](https://github.com/hashicorp/terraform-provider-google-beta/pull/3015)) -* monitoring: added windows based availability sli to the resource `google_monitoring_slo` ([#3013](https://github.com/hashicorp/terraform-provider-google-beta/pull/3013)) -* sql: added `settings.0.backup_configuration.transaction_log_retention_days` and `settings.0.backup_configuration.transaction_log_retention_days` fields to `google_sql_database_instance` ([#3010](https://github.com/hashicorp/terraform-provider-google-beta/pull/3010)) -* storage: added `kms_key_name` to `google_storage_bucket_object` resource ([#3026](https://github.com/hashicorp/terraform-provider-google-beta/pull/3026)) - -BUG FIXES: -* bigquery: fixed materialized view to be recreated when query changes ([#3032](https://github.com/hashicorp/terraform-provider-google-beta/pull/3032)) -* bigtable: fixed bug where gc_policy would attempt to recreate the resource when switching from deprecated attribute but maintaining the same underlying value ([#3037](https://github.com/hashicorp/terraform-provider-google-beta/pull/3037)) -* bigtable: required resource recreation if any fields change on `resource_bigtable_gc_policy` ([#2991](https://github.com/hashicorp/terraform-provider-google-beta/pull/2991)) -* binaryauthorization: fixed permadiff in `google_binary_authorization_attestor` ([#3035](https://github.com/hashicorp/terraform-provider-google-beta/pull/3035)) -* cloudfunction: added retry logic for `google_cloudfunctions_function` updates ([#2992](https://github.com/hashicorp/terraform-provider-google-beta/pull/2992)) -* cloudidentity: fixed a bug where `google_cloud_identity_group` would periodically fail with a 403 ([#3012](https://github.com/hashicorp/terraform-provider-google-beta/pull/3012)) -* compute: fixed a perma-diff for `nat_ips` that were specified as short forms in `google_compute_router_nat` ([#3007](https://github.com/hashicorp/terraform-provider-google-beta/pull/3007)) -* compute: fixed perma-diff for cos-family disk images ([#3024](https://github.com/hashicorp/terraform-provider-google-beta/pull/3024)) -* compute: Fixed service account scope alias to be updated. ([#3021](https://github.com/hashicorp/terraform-provider-google-beta/pull/3021)) -* container: fixed container cluster not removed from the state when received 404 error on delete call for the resource `google_container_cluster` ([#3018](https://github.com/hashicorp/terraform-provider-google-beta/pull/3018)) -* container: Fixed failure in deleting `maintenance_exclusion` for `google_container_cluster` ([#3014](https://github.com/hashicorp/terraform-provider-google-beta/pull/3014)) -* container: fixed an issue where release channel UNSPECIFIED could not be set ([#3019](https://github.com/hashicorp/terraform-provider-google-beta/pull/3019)) -* essentialcontacts: made `language_tag` required for `google_essential_contacts_contact` ([#2994](https://github.com/hashicorp/terraform-provider-google-beta/pull/2994)) -* serviceusage: fixed an issue in `google_service_usage_consumer_quota_override` where setting the `override_value` to 0 would result in a permanent diff ([#2985](https://github.com/hashicorp/terraform-provider-google-beta/pull/2985)) -* serviceusage: fixed an issue in `google_service_usage_consumer_quota_override` where setting the `override_value` to 0 would result in a permanent diff ([#3025](https://github.com/hashicorp/terraform-provider-google-beta/pull/3025)) - -## 3.59.0 (March 08, 2021) - -FEATURES: -* **New Resource:** `google_dataproc_metastore_service` ([#2977](https://github.com/hashicorp/terraform-provider-google-beta/pull/2977)) -* **New Resource:** `google_workflows_workflow` ([#2989](https://github.com/hashicorp/terraform-provider-google-beta/pull/2989)) -* **New Resource:** `google_apigee_instance` ([#2986](https://github.com/hashicorp/terraform-provider-google-beta/pull/2986)) -* **New Resource:** `google_eventarc_trigger` ([#2972](https://github.com/hashicorp/terraform-provider-google-beta/pull/2972)) - -IMPROVEMENTS: -* composer: added `encryption_config` to `google_composer_environment` resource ([#2967](https://github.com/hashicorp/terraform-provider-google-beta/pull/2967)) -* compute: Added graceful termination to `google_container_node_pool` create calls so that partially created node pools will resume the original operation if the Terraform process is killed mid create. ([#2969](https://github.com/hashicorp/terraform-provider-google-beta/pull/2969)) -* redis : marked `auth_string` on the `resource_redis_instance` resource as sensitive ([#2974](https://github.com/hashicorp/terraform-provider-google-beta/pull/2974)) - -BUG FIXES: -* apigee: fixed IDs when importing `google_apigee_organization` resource ([#2966](https://github.com/hashicorp/terraform-provider-google-beta/pull/2966)) -* artifactregistry: fixed issue where updating `google_artifact_registry_repository` always failed ([#2968](https://github.com/hashicorp/terraform-provider-google-beta/pull/2968)) -* compute : fixed a bug where `guest_flush` could not be set to false for the resource `google_compute_resource_policy` ([#2975](https://github.com/hashicorp/terraform-provider-google-beta/pull/2975)) -* compute: fixed a panic on empty `target_size` in `google_compute_region_instance_group_manager` ([#2979](https://github.com/hashicorp/terraform-provider-google-beta/pull/2979)) -* redis: fixed invalid value error on `auth_string` in `google_redis_instance` ([#2970](https://github.com/hashicorp/terraform-provider-google-beta/pull/2970)) - -## 3.58.0 (February 23, 2021) - -NOTES: -* `google_bigquery_table` resources now cannot be destroyed unless `deletion_protection = false` is set in state for the resource. ([#2954](https://github.com/hashicorp/terraform-provider-google-beta/pull/2954)) - -FEATURES: -* **New Data Source:** `google_runtimeconfig_variable` ([#2945](https://github.com/hashicorp/terraform-provider-google-beta/pull/2945)) -* **New Data Source:** `google_iap_client` ([#2951](https://github.com/hashicorp/terraform-provider-google-beta/pull/2951)) - -IMPROVEMENTS: -* bigquery: added `deletion_protection` field to `google_bigquery_table` to make deleting them require an explicit intent. ([#2954](https://github.com/hashicorp/terraform-provider-google-beta/pull/2954)) -* cloudrun: updated retry logic to attempt to retry 409 errors from the Cloud Run API, which may be returned intermittently on create. ([#2948](https://github.com/hashicorp/terraform-provider-google-beta/pull/2948)) -* compute: removed max items limit from `google_compute_target_ssl_proxy`. The API currently allows upto 15 Certificates. ([#2964](https://github.com/hashicorp/terraform-provider-google-beta/pull/2964)) -* compute: added support for Private Services Connect for Google APIs in `google_compute_global_address` and `google_compute_global_forwarding_rule` ([#2956](https://github.com/hashicorp/terraform-provider-google-beta/pull/2956)) -* iam: added a retry condition that retries editing `iam_binding` and `iam_member` resources on policies that have frequently deleted service accounts ([#2963](https://github.com/hashicorp/terraform-provider-google-beta/pull/2963)) -* redis: added transit encryption mode support for `google_redis_instance` ([#2955](https://github.com/hashicorp/terraform-provider-google-beta/pull/2955)) -* secretmanager: changed endpoint to use v1 instead of v1beta1 as it is more up-to-date ([#2946](https://github.com/hashicorp/terraform-provider-google-beta/pull/2946)) -* sql: added `insights_config` block to `google_sql_database_instance` resource ([#2944](https://github.com/hashicorp/terraform-provider-google-beta/pull/2944)) - -BUG FIXES: -* compute: fixed an issue where the provider could return an error on a successful delete operation ([#2958](https://github.com/hashicorp/terraform-provider-google-beta/pull/2958)) -* datacatalog: fixed import issue for `google_data_catalog_taxonomy` ([#2961](https://github.com/hashicorp/terraform-provider-google-beta/pull/2961)) -* dataproc : fixed `max_failure_per_hour` not sent in API request for the resource `google_dataproc_job` ([#2949](https://github.com/hashicorp/terraform-provider-google-beta/pull/2949)) -* dlp : modified `google_data_loss_prevention_stored_info_type` `regex.group_indexes` field to trigger resource recreation on update ([#2947](https://github.com/hashicorp/terraform-provider-google-beta/pull/2947)) -* sql: fixed diffs based on case for `charset` in `google_sql_database` ([#2957](https://github.com/hashicorp/terraform-provider-google-beta/pull/2957)) - -## 3.57.0 (February 16, 2021) - -DEPRECATIONS: -* compute: deprecated `source_disk_url` field in `google_compute_snapshot`. ([#2939](https://github.com/hashicorp/terraform-provider-google-beta/pull/2939)) -* kms: deprecated `self_link` field in `google_kms_keyring` and `google_kms_cryptokey` resource as it is identical value to `id` field. ([#2939](https://github.com/hashicorp/terraform-provider-google-beta/pull/2939)) -* pubsub: deprecated `path` field in `google_pubsub_subscription` resource as it is identical value to `id` field. ([#2939](https://github.com/hashicorp/terraform-provider-google-beta/pull/2939)) - -FEATURES: -* **New Resource:** `google_essential_contacts_contact` ([#2943](https://github.com/hashicorp/terraform-provider-google-beta/pull/2943)) -* **New Resource:** `google_privateca_certificate` ([#2924](https://github.com/hashicorp/terraform-provider-google-beta/pull/2924)) - -IMPROVEMENTS: -* bigquery: added `status` field to `google_bigquery_job` ([#2926](https://github.com/hashicorp/terraform-provider-google-beta/pull/2926)) -* compute: added `disk.resource_policies` field to resource `google_compute_instance_template` ([#2929](https://github.com/hashicorp/terraform-provider-google-beta/pull/2929)) -* compute: added `nic_type` field to `google_compute_instance_template ` resource to support gVNIC ([#2941](https://github.com/hashicorp/terraform-provider-google-beta/pull/2941)) -* compute: added `nic_type` field to `google_compute_instance` resource to support gVNIC ([#2941](https://github.com/hashicorp/terraform-provider-google-beta/pull/2941)) -* pubsub: marked `kms_key_name` field in `google_pubsub_topic` as updatable ([#2942](https://github.com/hashicorp/terraform-provider-google-beta/pull/2942)) - -BUG FIXES: -* appengine: added retry for P4SA propagation delay ([#2938](https://github.com/hashicorp/terraform-provider-google-beta/pull/2938)) -* compute: fixed overly-aggressive detection of changes to google_compute_security_policy rules ([#2940](https://github.com/hashicorp/terraform-provider-google-beta/pull/2940)) - -## 3.56.0 (February 8, 2021) - -FEATURES: -* **New Resource:** `google_privateca_certificate` ([#2924](https://github.com/hashicorp/terraform-provider-google-beta/pull/2924)) - -IMPROVEMENTS: -* all: added plan time validations for fields that expect base64 values. ([#2906](https://github.com/hashicorp/terraform-provider-google-beta/pull/2906)) -* compute: added `disk.resource_policies` field to resource `google_compute_instance_template` ([#2929](https://github.com/hashicorp/terraform-provider-google-beta/pull/2929)) -* sql: added support for point-in-time-recovery to `google_sql_database_instance` ([#2923](https://github.com/hashicorp/terraform-provider-google-beta/pull/2923)) -* monitoring : added `availability` sli metric support for the resource `google_monitoring_slo` ([#2908](https://github.com/hashicorp/terraform-provider-google-beta/pull/2908)) - -BUG FIXES: -* bigquery: fixed bug where you could not reorder columns on `schema` for resource `google_bigquery_table` ([#2913](https://github.com/hashicorp/terraform-provider-google-beta/pull/2913)) -* cloudrun: suppressed `run.googleapis.com/ingress-status` annotation in `google_cloud_run_service` ([#2920](https://github.com/hashicorp/terraform-provider-google-beta/pull/2920)) -* serviceaccount: loosened restrictions on `account_id` for datasource `google_service_account` ([#2917](https://github.com/hashicorp/terraform-provider-google-beta/pull/2917)) - -## 3.55.0 (February 1, 2021) - -BREAKING CHANGES: -* Reverted `* bigquery: made incompatible changes to the `google_bigquery_table.schema` field to cause the resource to be recreated ([#8232](https://github.com/hashicorp/terraform-provider-google/pull/8232))` due to unintended interactions with a bug introduced in an earlier version of the resource. - -FEATURES: -* **New Data Source:** `google_runtimeconfig_config` ([#8268](https://github.com/hashicorp/terraform-provider-google/pull/8268)) - -IMPROVEMENTS: -* compute: added `distribution_policy_target_shape` field to `google_compute_region_instance_group_manager` resource ([#8277](https://github.com/hashicorp/terraform-provider-google/pull/8277)) -* container: promoted `master_global_access_config`, `tpu_ipv4_cidr_block`, `default_snat_status` and `datapath_provider` fields of `google_container_cluster` to GA. ([#8303](https://github.com/hashicorp/terraform-provider-google/pull/8303)) -* dataproc: Added field `temp_bucket` to `google_dataproc_cluster` cluster config. ([#8131](https://github.com/hashicorp/terraform-provider-google/pull/8131)) -* notebooks: added `tags`, `service_account_scopes`,`shielded_instance_config` to `google_notebooks_instance` ([#8289](https://github.com/hashicorp/terraform-provider-google/pull/8289)) -* provider: added plan time validations for fields that expect base64 values. ([#8304](https://github.com/hashicorp/terraform-provider-google/pull/8304)) - -BUG FIXES: -* bigquery: fixed permadiff on expiration_ms for `google_bigquery_table` ([#8298](https://github.com/hashicorp/terraform-provider-google/pull/8298)) -* billing: fixed perma-diff on currency_code in `google_billing_budget` ([#8266](https://github.com/hashicorp/terraform-provider-google/pull/8266)) - * compute: changed private_ipv6_google_access in `google_compute_subnetwork` to correctly send a fingerprint ([#8290](https://github.com/hashicorp/terraform-provider-google/pull/8290)) -* healthcare: add retry logic on healthcare dataset not initialized error ([#8256](https://github.com/hashicorp/terraform-provider-google/pull/8256)) - -## 3.54.0 (January 25, 2021) - -KNOWN ISSUES: New `google_bigquery_table` behaviour introduced in this version had unintended consequences, and may incorrectly flag tables for recreation. We expect to revert this for `3.55.0`. - -FEATURES: -* **New Data Source:** `google_cloud_run_locations` ([#2864](https://github.com/hashicorp/terraform-provider-google-beta/pull/2864)) -* **New Resource:** `google_privateca_certificate_authority` ([#2877](https://github.com/hashicorp/terraform-provider-google-beta/pull/2877)) -* **New Resource:** `google_privateca_certificate_authority_iam_binding` ([#2883](https://github.com/hashicorp/terraform-provider-google-beta/pull/2883)) -* **New Resource:** `google_privateca_certificate_authority_iam_member` ([#2883](https://github.com/hashicorp/terraform-provider-google-beta/pull/2883)) -* **New Resource:** `google_privateca_certificate_authority_iam_policy` ([#2883](https://github.com/hashicorp/terraform-provider-google-beta/pull/2883)) - -IMPROVEMENTS: -* bigquery: made incompatible changes to the `google_bigquery_table.schema` field cause the resource to be recreated ([#2876](https://github.com/hashicorp/terraform-provider-google-beta/pull/2876)) -* bigtable: fixed an issue where the `google_bigtable_instance` resource was not inferring the zone from the provider. ([#2873](https://github.com/hashicorp/terraform-provider-google-beta/pull/2873)) -* cloudscheduler: fixed unnecessary recreate for `google_cloud_scheduler_job` ([#2882](https://github.com/hashicorp/terraform-provider-google-beta/pull/2882)) -* compute: added `scaling_schedules` fields to `google_compute_autoscaler` and `google_compute_region_autoscaler` (beta) ([#2879](https://github.com/hashicorp/terraform-provider-google-beta/pull/2879)) -* compute: fixed an issue where `google_compute_region_per_instance_config`, `google_compute_per_instance_config`, `google_compute_region_instance_group_manager` resources were not inferring the region/zone from the provider. ([#2874](https://github.com/hashicorp/terraform-provider-google-beta/pull/2874)) -* memcache: fixed an issue where `google_memcached_instance` resource was not inferring the region from the provider. ([#2863](https://github.com/hashicorp/terraform-provider-google-beta/pull/2863)) -* tpu: fixed an issue where `google_tpu_node` resource was not inferring the zone from the provider. ([#2863](https://github.com/hashicorp/terraform-provider-google-beta/pull/2863)) -* vpcaccess: fixed an issue where `google_vpc_access_connector` resource was not inferring the region from the provider. ([#2863](https://github.com/hashicorp/terraform-provider-google-beta/pull/2863)) - -BUG FIXES: -* bigquery: fixed an issue in `bigquery_dataset_iam_member` where deleted members were not handled correctly ([#2875](https://github.com/hashicorp/terraform-provider-google-beta/pull/2875)) -* compute: fixed a perma-diff on `google_compute_health_check` when `log_config.enable` is set to false ([#2866](https://github.com/hashicorp/terraform-provider-google-beta/pull/2866)) -* notebooks: fixed permadiff on noRemoveDataDisk for `google_notebooks_instance` ([#2880](https://github.com/hashicorp/terraform-provider-google-beta/pull/2880)) -* resourcemanager: fixed an inconsistent result when IAM conditions are specified with `google_folder_iam_*` ([#2878](https://github.com/hashicorp/terraform-provider-google-beta/pull/2878)) -* healthcare: added retry logic on healthcare dataset not initialized error ([#2885](https://github.com/hashicorp/terraform-provider-google-beta/pull/2885)) - -## 3.53.0 (January 19, 2021) - -FEATURES: -* **New Data Source:** `google_compute_instance_template` ([#2842](https://github.com/hashicorp/terraform-provider-google-beta/pull/2842)) -* **New Resource:** `google_apigee_organization` ([#2856](https://github.com/hashicorp/terraform-provider-google-beta/pull/2856)) - -IMPROVEMENTS: -* accesscontextmanager: added support for `google_access_context_manager_gcp_user_access_binding` ([#2851](https://github.com/hashicorp/terraform-provider-google-beta/pull/2851)) -* memcached: fixed an issue where `google_memcached_instance` resource was not inferring the region from the provider. ([More info](https://github.com/hashicorp/terraform-provider-google/issues/8027)) -* serviceaccount: added a `keepers` field to `google_service_account_key` that recreates the field when it is modified ([#2860](https://github.com/hashicorp/terraform-provider-google-beta/pull/2860)) -* sql: added restore from backup support to `google_sql_database_instance` ([#2843](https://github.com/hashicorp/terraform-provider-google-beta/pull/2843)) -* sql: added support for MYSQL_8_0 on resource `google_sql_source_representation_instance` ([#2841](https://github.com/hashicorp/terraform-provider-google-beta/pull/2841)) -* tpu: fixed an issue where `google_tpu_node` resource was not inferring the zone from the provider. ([More info](https://github.com/hashicorp/terraform-provider-google/issues/8027)) -* vpcaccess: fixed an issue where `google_vpc_access_connector` resource was not inferring the region from the provider. ([More info](https://github.com/hashicorp/terraform-provider-google/issues/8027)) - -BUG FIXES: -* bigquery: enhanced diff suppress to ignore certain api divergences on resource `table` ([#2840](https://github.com/hashicorp/terraform-provider-google-beta/pull/2840)) -* container: fixed crash due to nil exclusions object when updating an existent cluster with maintenance_policy but without exclusions ([#2839](https://github.com/hashicorp/terraform-provider-google-beta/pull/2839)) -* project: fixed a bug in `google_project_access_approval_settings` where the default `project` was used rather than `project_id` ([#2852](https://github.com/hashicorp/terraform-provider-google-beta/pull/2852)) - -## 3.52.0 (January 11, 2021) - -BREAKING CHANGES: -* billing: removed import support for `google_billing_budget` as it never functioned correctly ([#2789](https://github.com/hashicorp/terraform-provider-google-beta/pull/2789)) - -FEATURES: -* **New Data Source:** `google_sql_backup_run` ([#2824](https://github.com/hashicorp/terraform-provider-google-beta/pull/2824)) -* **New Data Source:** `google_storage_bucket_object_content` ([#2785](https://github.com/hashicorp/terraform-provider-google-beta/pull/2785)) -* **New Resource:** `google_billing_subaccount` ([#2788](https://github.com/hashicorp/terraform-provider-google-beta/pull/2788)) -* **New Resource:** `google_pubsub_lite_subscription` ([#2781](https://github.com/hashicorp/terraform-provider-google-beta/pull/2781)) -* **New Resource:** `google_pubsub_lite_topic` ([#2781](https://github.com/hashicorp/terraform-provider-google-beta/pull/2781)) - -IMPROVEMENTS: -* bigtable: added support for specifying `duration` for `bigtable_gc_policy` to allow durations shorter than a day ([#2815](https://github.com/hashicorp/terraform-provider-google-beta/pull/2815)) -* compute: Added support for Google Virtual Network Interface (gVNIC) for `google_compute_image` ([#2779](https://github.com/hashicorp/terraform-provider-google-beta/pull/2779)) -* compute: added SHARED_LOADBALANCER_VIP as a valid option for `google_compute_address.purpose` ([#2773](https://github.com/hashicorp/terraform-provider-google-beta/pull/2773)) -* compute: added field `multiwriter` to resource `disk` (beta) ([#2822](https://github.com/hashicorp/terraform-provider-google-beta/pull/2822)) -* compute: added support for `enable_independent_endpoint_mapping` to `google_compute_router_nat` resource ([#2805](https://github.com/hashicorp/terraform-provider-google-beta/pull/2805)) -* compute: added support for `filter.direction` to `google_compute_packet_mirroring` ([#2825](https://github.com/hashicorp/terraform-provider-google-beta/pull/2825)) -* compute: promoted `confidential_instance_config` field in `google_compute_instance` and `google_compute_instance_template` to GA ([#2818](https://github.com/hashicorp/terraform-provider-google-beta/pull/2818)) -* dataflow: Added optional `kms_key_name` field for `google_dataflow_job` ([#2829](https://github.com/hashicorp/terraform-provider-google-beta/pull/2829)) -* dataflow: added documentation about using `parameters` for custom service account and other pipeline options to `google_dataflow_flex_template_job` ([#2776](https://github.com/hashicorp/terraform-provider-google-beta/pull/2776)) -* redis: added `auth_string` output to `google_redis_instance` when `auth_enabled` is `true` ([#2819](https://github.com/hashicorp/terraform-provider-google-beta/pull/2819)) -* sql: added support for setting the `type` field on `google_sql_user` to support IAM authentication ([#2802](https://github.com/hashicorp/terraform-provider-google-beta/pull/2802)) - -BUG FIXES: -* bigquery: fixed a bug in `google_bigquery_connection` that caused the resource to function incorrectly when `connection_id` was unset ([#2792](https://github.com/hashicorp/terraform-provider-google-beta/pull/2792)) -* compute: removed requirement for `google_compute_region_url_map` default_service, as it should be a choice of default_service or default_url_redirect ([#2810](https://github.com/hashicorp/terraform-provider-google-beta/pull/2810)) -* cloud_tasks: fixed permadiff on retry_config.max_retry_duration for `google_cloud_tasks_queue` when the 0s is supplied ([#2812](https://github.com/hashicorp/terraform-provider-google-beta/pull/2812)) -* cloudfunctions: fixed a bug where `google_cloudfunctions_function` would sometimes fail to update after being imported from gcloud ([#2780](https://github.com/hashicorp/terraform-provider-google-beta/pull/2780)) -* cloudrun: fixed a permanent diff on `google_cloud_run_domain_mapping` `spec.force_override` field ([#2791](https://github.com/hashicorp/terraform-provider-google-beta/pull/2791)) -* container: added plan time validation to ensure `enable_private_nodes` is true if `master_ipv4_cidr_block` is set on resource `cluster` ([#2811](https://github.com/hashicorp/terraform-provider-google-beta/pull/2811)) -* container: fixed an issue where setting `google_container_cluster.private_cluster_config[0].master_global_access_config.enabled` to `false` caused a permadiff. ([#2816](https://github.com/hashicorp/terraform-provider-google-beta/pull/2816)) -* container: fixed setting kubelet_config to disable cpu_cfs_quota does not seem to work ([#2820](https://github.com/hashicorp/terraform-provider-google-beta/pull/2820)) -* dataproc: updated jobs to no longer wait for job completion during create ([#2809](https://github.com/hashicorp/terraform-provider-google-beta/pull/2809)) -* filestore: updated retry logic to fail fast on quota error which cannot succeed on retry. ([#2814](https://github.com/hashicorp/terraform-provider-google-beta/pull/2814)) -* logging: fixed updating on disabled in `google_logging_project_sink` ([#2821](https://github.com/hashicorp/terraform-provider-google-beta/pull/2821)) -* scheduler: Fixed syntax error in the Cloud Scheduler HTTP target example. ([#2777](https://github.com/hashicorp/terraform-provider-google-beta/pull/2777)) -* sql: fixed a bug in `google_sql_database_instance` that caused a permadiff on `settings.replication_type` ([#2778](https://github.com/hashicorp/terraform-provider-google-beta/pull/2778)) -* storage: updated IAM resources to refresh etag sooner on an IAM conflict error, which will make applications of multiple IAM resources much faster. ([#2814](https://github.com/hashicorp/terraform-provider-google-beta/pull/2814)) - -## 3.51.1 (January 07, 2021) - -BUG FIXES: -* all: fixed a bug that would occur in various resources due to comparison of large integers ([#2826](https://github.com/hashicorp/terraform-provider-google-beta/pull/2826)) - -## 3.51.0 (December 14, 2020) - -FEATURES: -* **New Resource:** `google_firestore_document` ([#2759](https://github.com/hashicorp/terraform-provider-google-beta/pull/2759)) - -IMPROVEMENTS: -* compute: added CDN features to `google_compute_region_backend_service`. ([#2762](https://github.com/hashicorp/terraform-provider-google-beta/pull/2762)) -* compute: added Flexible Cache Control features to `google_compute_backend_service`. ([#2762](https://github.com/hashicorp/terraform-provider-google-beta/pull/2762)) -* compute: added `replacement_method` field to `update_policy` block of `google_compute_instance_group_manager` ([#2756](https://github.com/hashicorp/terraform-provider-google-beta/pull/2756)) -* compute: added `replacement_method` field to `update_policy` block of `google_compute_region_instance_group_manager` ([#2756](https://github.com/hashicorp/terraform-provider-google-beta/pull/2756)) -* logging: added plan time validation for `unique_writer_identity` on `google_logging_project_sink` ([#2767](https://github.com/hashicorp/terraform-provider-google-beta/pull/2767)) -* storage: added more lifecycle conditions to `google_storage_bucket` resource ([#2761](https://github.com/hashicorp/terraform-provider-google-beta/pull/2761)) - -BUG FIXES: -* all: bump default request timeout to avoid conflicts if creating a resource takes longer than expected ([#2769](https://github.com/hashicorp/terraform-provider-google-beta/pull/2769)) -* project: fixed a bug where `google_project_default_service_accounts` would delete all IAM bindings on a project when run with `action = "DEPRIVILEGE"` ([#2771](https://github.com/hashicorp/terraform-provider-google-beta/pull/2771)) -* spanner: fixed an issue in `google_spanner_database` where multi-statement updates were not formatted correctly ([#2766](https://github.com/hashicorp/terraform-provider-google-beta/pull/2766)) -* sql: fixed a bug in `google_sql_database_instance` that caused a permadiff on `settings.replication_type` ([#2778](https://github.com/hashicorp/terraform-provider-google-beta/pull/2778)) - -## 3.50.0 (December 7, 2020) - -FEATURES: -* **New Data Source:** `google_composer_environment` ([#2745](https://github.com/hashicorp/terraform-provider-google-beta/pull/2745)) -* **New Data Source:** `google_monitoring_cluster_istio_service` ([#2730](https://github.com/hashicorp/terraform-provider-google-beta/pull/2730)) -* **New Data Source:** `google_monitoring_mesh_istio_service` ([#2730](https://github.com/hashicorp/terraform-provider-google-beta/pull/2730)) - -IMPROVEMENTS: -* compute: added `replacement_method` field to `update_policy` block of `google_compute_instance_group_manager` ([#2756](https://github.com/hashicorp/terraform-provider-google-beta/pull/2756)) -* compute: added `replacement_method` field to `update_policy` block of `google_compute_region_instance_group_manager` ([#2756](https://github.com/hashicorp/terraform-provider-google-beta/pull/2756)) -* compute: added more fields to cdn_policy block of `google_compute_backend_bucket` ([#2741](https://github.com/hashicorp/terraform-provider-google-beta/pull/2741)) -* compute: updated `google_compute_url_map`'s fields referring to backend services to be able to refer to backend buckets. ([#2754](https://github.com/hashicorp/terraform-provider-google-beta/pull/2754)) -* container: added cluster state check in `resource_container_node_pool` ([#2740](https://github.com/hashicorp/terraform-provider-google-beta/pull/2740)) -* google: added support for more import formats to google_project_iam_custom_role ([#2735](https://github.com/hashicorp/terraform-provider-google-beta/pull/2735)) -* project: added new restore_policy `REVERT_AND_IGNORE_FAILURE` to `google_project_default_service_accounts` ([#2750](https://github.com/hashicorp/terraform-provider-google-beta/pull/2750)) -* serviceusage: Allowed use of field `force` with updates to `google_service_usage_consumer_quota_override` ([#2747](https://github.com/hashicorp/terraform-provider-google-beta/pull/2747)) - -BUG FIXES: -* bigqueryconnection: fixed failure to import a resource if it has a non-default project or location. ([#2746](https://github.com/hashicorp/terraform-provider-google-beta/pull/2746)) -* datacatalog: fixed permadiff on import for tags with a taxonomy set in config. ([#2744](https://github.com/hashicorp/terraform-provider-google-beta/pull/2744)) -* iam: fixed iam conflict handling so that optimistic-locking retries will succeed more often. ([#2753](https://github.com/hashicorp/terraform-provider-google-beta/pull/2753)) -* storage: fixed an issue in `google_storage_bucket` where `cors` could not be removed ([#2732](https://github.com/hashicorp/terraform-provider-google-beta/pull/2732)) - -## 3.49.0 (November 24, 2020) - -FEATURES: -* **New Resource:** google_healthcare_consent_store ([#2713](https://github.com/hashicorp/terraform-provider-google-beta/pull/2713)) -* **New Resource:** google_healthcare_consent_store_iam_binding ([#2713](https://github.com/hashicorp/terraform-provider-google-beta/pull/2713)) -* **New Resource:** google_healthcare_consent_store_iam_member ([#2713](https://github.com/hashicorp/terraform-provider-google-beta/pull/2713)) -* **New Resource:** google_healthcare_consent_store_iam_policy ([#2713](https://github.com/hashicorp/terraform-provider-google-beta/pull/2713)) - -IMPROVEMENTS: -* bigquery: added `ORC` as a valid option to `source_format` field of `google_bigquery_table` resource ([#2714](https://github.com/hashicorp/terraform-provider-google-beta/pull/2714)) -* compute: added `custom_response_headers` field to `google_compute_backend_service` resource ([#2722](https://github.com/hashicorp/terraform-provider-google-beta/pull/2722)) -* container: added maintenance_exclusions_window to `google_container_cluster` ([#2724](https://github.com/hashicorp/terraform-provider-google-beta/pull/2724)) -* logging: added description and disabled to logging sinks ([#2718](https://github.com/hashicorp/terraform-provider-google-beta/pull/2718)) -* runtimeconfig: marked value and text fields in `google_runtimeconfig_variable` resource as sensitive ([#2717](https://github.com/hashicorp/terraform-provider-google-beta/pull/2717)) -* sql: added `deletion_policy` field to `google_sql_user` to enable abandoning users rather than deleting them ([#2719](https://github.com/hashicorp/terraform-provider-google-beta/pull/2719)) - -BUG FIXES: -* bigtable: added ignore_warnings flag to create call for `google_bigtable_app_profile` ([#2716](https://github.com/hashicorp/terraform-provider-google-beta/pull/2716)) - -## 3.48.0 (November 16, 2020) - -FEATURES: -* **New Data Source:** `google_iam_workload_identity_pool_provider` ([#2688](https://github.com/hashicorp/terraform-provider-google-beta/pull/2688)) - -IMPROVEMENTS: -* apigateway: added api_config_id_prefix field to `google_api_gateway_api_config` resoure ([#2692](https://github.com/hashicorp/terraform-provider-google-beta/pull/2692)) -* cloudfunctions: fixed a bug with `google_cloudfunction_function` that blocked updates when Organization Policies are enabled. ([#2681](https://github.com/hashicorp/terraform-provider-google-beta/pull/2681)) -* compute: added `autoscaling_policy.0.scale_in_control` fields to `google_compute_autoscaler` ([#2703](https://github.com/hashicorp/terraform-provider-google-beta/pull/2703)) -* compute: added `autoscaling_policy.0.scale_in_control` fields to `google_compute_region_autoscaler` ([#2703](https://github.com/hashicorp/terraform-provider-google-beta/pull/2703)) -* compute: added update support for `google_compute_interconnect_attachment` `bandwidth` field ([#2698](https://github.com/hashicorp/terraform-provider-google-beta/pull/2698)) -* dataproc: added "FLINK", "DOCKER", "HBASE" as valid options for field cluster_config.0.software_config.0.optional_components of `google_dataproc_cluster` resource ([#2683](https://github.com/hashicorp/terraform-provider-google-beta/pull/2683)) - -BUG FIXES: -* cloudrun: added diff suppress function for `google_cloud_run_domain_mapping` `metadata.annotations` to ignore API-set fields ([#2700](https://github.com/hashicorp/terraform-provider-google-beta/pull/2700)) -* compute: fixed an issue in `google_compute_packet_mirroring` where updates would fail due to `network` not being updatable ([#2704](https://github.com/hashicorp/terraform-provider-google-beta/pull/2704)) -* datacatalog: fixed an issue in `google_data_catalog_taxonomy` and `google_data_catalog_policy_tag` where importing would fail ([#2694](https://github.com/hashicorp/terraform-provider-google-beta/pull/2694)) -* spanner: marked `google_spanner_instance.config` as ForceNew as is not updatable ([#2699](https://github.com/hashicorp/terraform-provider-google-beta/pull/2699)) - -## 3.47.0 (November 09, 2020) - -FEATURES: -* **New Data Source:** `google_iam_workload_identity_pool` ([#2663](https://github.com/hashicorp/terraform-provider-google-beta/pull/2663)) -* **New Resource:** `google_iam_workload_identity_pool_provider` ([#2670](https://github.com/hashicorp/terraform-provider-google-beta/pull/2670)) -* **New Resource:** `google_project_default_service_accounts` ([#2668](https://github.com/hashicorp/terraform-provider-google-beta/pull/2668)) - -IMPROVEMENTS: -* cloudfunctions: fixed a bug with `google_cloudfunction_function` that blocked updates when Organization Policies are enabled. ([#2681](https://github.com/hashicorp/terraform-provider-google-beta/pull/2681)) -* functions: added 4096 as a valid value for available_memory_mb field of `google_cloudfunction_function` ([#2666](https://github.com/hashicorp/terraform-provider-google-beta/pull/2666)) -* cloudrun: patched `google_cloud_run_service` to suppress Google generated annotations ([#2679](https://github.com/hashicorp/terraform-provider-google-beta/pull/2679)) - -BUG FIXES: -* dataflow: removed required validation for zone for `google_data_flow_job` when region is given in the config ([#2662](https://github.com/hashicorp/terraform-provider-google-beta/pull/2662)) -* monitoring: Fixed type of `google_monitoring_slo`'s `range` values - some `range` values are doubles, others are integers. ([#2655](https://github.com/hashicorp/terraform-provider-google-beta/pull/2655)) -* pubsub: Fixed permadiff on push_config.attributes. ([#2672](https://github.com/hashicorp/terraform-provider-google-beta/pull/2672)) -* storage: fixed an issue in `google_storage_bucket` where `lifecycle_rules` were always included in update requests ([#2684](https://github.com/hashicorp/terraform-provider-google-beta/pull/2684)) - -## 3.46.0 (November 02, 2020) - -NOTES: -* compute: updated `google_compute_machine_image` resource to complete once the Image is ready. ([#2637](https://github.com/hashicorp/terraform-provider-google-beta/pull/2637)) - -FEATURES: -* **New Resource:** `google_api_gateway_api_config_iam_binding` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_api_gateway_api_config_iam_member` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_api_gateway_api_config_iam_policy` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_api_gateway_api_config` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_api_gateway_api_iam_binding` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_api_gateway_api_iam_member` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_api_gateway_api_iam_policy` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_api_gateway_api` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_api_gateway_gateway_iam_binding` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_api_gateway_gateway_iam_member` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_api_gateway_gateway_iam_policy` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_api_gateway_gateway` ([#2636](https://github.com/hashicorp/terraform-provider-google-beta/pull/2636)) -* **New Resource:** `google_compute_instance_from_machine_image` ([#2637](https://github.com/hashicorp/terraform-provider-google-beta/pull/2637)) -* **New Resource:** `google_compute_machine_image_iam_binding` ([#2637](https://github.com/hashicorp/terraform-provider-google-beta/pull/2637)) -* **New Resource:** `google_compute_machine_image_iam_member` ([#2637](https://github.com/hashicorp/terraform-provider-google-beta/pull/2637)) -* **New Resource:** `google_compute_machine_image_iam_policy` ([#2637](https://github.com/hashicorp/terraform-provider-google-beta/pull/2637)) -* **New Resource:** `google_iap_tunnel_iam_binding` ([#2642](https://github.com/hashicorp/terraform-provider-google-beta/pull/2642)) -* **New Resource:** `google_iap_tunnel_iam_member` ([#2642](https://github.com/hashicorp/terraform-provider-google-beta/pull/2642)) -* **New Resource:** `google_iap_tunnel_iam_policy` ([#2642](https://github.com/hashicorp/terraform-provider-google-beta/pull/2642)) - -IMPROVEMENTS: -* asset: added conditions to Cloud Asset Feeds ([#2640](https://github.com/hashicorp/terraform-provider-google-beta/pull/2640)) -* bigquery: added `email_preferences ` field to `google_bigquery_data_transfer_config` resource ([#2652](https://github.com/hashicorp/terraform-provider-google-beta/pull/2652)) -* bigquery: added `schedule_options` field to `google_bigquery_data_transfer_config` resource ([#2641](https://github.com/hashicorp/terraform-provider-google-beta/pull/2641)) -* compute: added `private_ipv6_google_access` field to `google_compute_subnetwork` ([#2649](https://github.com/hashicorp/terraform-provider-google-beta/pull/2649)) -* compute: added storage_locations & cmek fields to `google_compute_machine_image` resource ([#2637](https://github.com/hashicorp/terraform-provider-google-beta/pull/2637)) -* compute: added support for non-destructive updates to `export_custom_routes` and `import_custom_routes` for `google_compute_network_peering` ([#2633](https://github.com/hashicorp/terraform-provider-google-beta/pull/2633)) -* compute: relaxed `load_balancing_scheme` validation of `google_compute_region_backend_service` to support external network load-balancers ([#2628](https://github.com/hashicorp/terraform-provider-google-beta/pull/2628)) -* container: added `confidential_nodes` field to `google_container_cluster` resource ([#2632](https://github.com/hashicorp/terraform-provider-google-beta/pull/2632)) -* datacatalog: added taxonomy and policy_tag `google_data_catalog` ([#2626](https://github.com/hashicorp/terraform-provider-google-beta/pull/2626)) -* dlp: added `custom_info_types` to `google_dlp_inspect_template` ([#2648](https://github.com/hashicorp/terraform-provider-google-beta/pull/2648)) -* functions: added `build_environment_variables` field to `google_cloudfunction_function` ([#2629](https://github.com/hashicorp/terraform-provider-google-beta/pull/2629)) -* kms: added `skip_initial_version_creation` to `google_kms_crypto_key` ([#2645](https://github.com/hashicorp/terraform-provider-google-beta/pull/2645)) -* monitoring: added Monitoring Query Language based alerting for `google_monitoring_alert_policy` ([#2651](https://github.com/hashicorp/terraform-provider-google-beta/pull/2651)) - -BUG FIXES: -* compute: fixed an issue where `google_compute_health_check` `port` values caused a diff when `port_specification` was unset or set to `""` ([#2635](https://github.com/hashicorp/terraform-provider-google-beta/pull/2635)) -* monitoring: added more retries for potential failed monitoring operations ([#2639](https://github.com/hashicorp/terraform-provider-google-beta/pull/2639)) -* osconfig: fixed an issue where the `rollout.disruption_budget.percentage` field in `google_os_config_patch_deployment` did not correspond to a field in the API ([#2644](https://github.com/hashicorp/terraform-provider-google-beta/pull/2644)) -* sql: fixed a case in `google_sql_database_instance` where we inadvertently required the `projects.get` permission for a service networking precheck introduced in `v3.44.0` ([#2634](https://github.com/hashicorp/terraform-provider-google-beta/pull/2634)) - -## 3.45.0 (October 28, 2020) - -BREAKING CHANGES: -* pubsub: changing the value of `google_pubsub_subscription.enable_message_ordering` will now recreate the resource. Previously, an error was returned. ([#2624](https://github.com/hashicorp/terraform-provider-google-beta/pull/2624)) -* spanner: `google_spanner_database` resources now cannot be destroyed unless `deletion_protection = false` is set in state for the resource. ([#2612](https://github.com/hashicorp/terraform-provider-google-beta/pull/2612)) - -NOTES: -* compute: added a warning to `google_compute_vpn_gateway` ([#2607](https://github.com/hashicorp/terraform-provider-google-beta/pull/2607)) - -FEATURES: -* **New Data Source:** `google_spanner_instance` ([#2602](https://github.com/hashicorp/terraform-provider-google-beta/pull/2602)) -* **New Resource:** `google_notebooks_instance_iam_binding` ([#2605](https://github.com/hashicorp/terraform-provider-google-beta/pull/2605)) -* **New Resource:** `google_notebooks_instance_iam_member` ([#2605](https://github.com/hashicorp/terraform-provider-google-beta/pull/2605)) -* **New Resource:** `google_notebooks_instance_iam_policy` ([#2605](https://github.com/hashicorp/terraform-provider-google-beta/pull/2605)) -* **New Resource:** `access_context_manager_access_level_condition` ([#2595](https://github.com/hashicorp/terraform-provider-google-beta/pull/2595)) -* **New Resource:** `google_bigquery_routine` ([#2622](https://github.com/hashicorp/terraform-provider-google-beta/pull/2622)) -* **New Resource:** `google_iam_workload_identity_pool` ([#2623](https://github.com/hashicorp/terraform-provider-google-beta/pull/2623)) -* **New Resource:** `google_data_catalog_taxonomy` ([#2626](https://github.com/hashicorp/terraform-provider-google-beta/pull/2626)) -* **New Resource:** `google_data_catalog_policy_tag` ([#2626](https://github.com/hashicorp/terraform-provider-google-beta/pull/2626)) -* **New Resource:** `google_data_catalog_taxonomy_iam_binding` ([#2626](https://github.com/hashicorp/terraform-provider-google-beta/pull/2626)) -* **New Resource:** `google_data_catalog_taxonomy_iam_member` ([#2626](https://github.com/hashicorp/terraform-provider-google-beta/pull/2626)) -* **New Resource:** `google_data_catalog_taxonomy_iam_policy` ([#2626](https://github.com/hashicorp/terraform-provider-google-beta/pull/2626)) -* **New Resource:** `google_data_catalog_policy_tag_iam_binding` ([#2626](https://github.com/hashicorp/terraform-provider-google-beta/pull/2626)) -* **New Resource:** `google_data_catalog_policy_tag_iam_member` ([#2626](https://github.com/hashicorp/terraform-provider-google-beta/pull/2626)) -* **New Resource:** `google_data_catalog_policy_tag_iam_policy` ([#2626](https://github.com/hashicorp/terraform-provider-google-beta/pull/2626)) - -IMPROVEMENTS: -* billing_budget: added `disable_default_iam_recipients ` field to `google_billing_budget` to allow disable sending email notifications to default recipients. ([#2606](https://github.com/hashicorp/terraform-provider-google-beta/pull/2606)) -* compute: added `interface` attribute to `google_compute_disk` ([#2609](https://github.com/hashicorp/terraform-provider-google-beta/pull/2609)) -* compute: added `mtu` field to `google_compute_network` resource ([#2617](https://github.com/hashicorp/terraform-provider-google-beta/pull/2617)) -* compute: added support for updating `network_interface.[d].network_ip` on `google_compute_instance` when changing network or subnetwork ([#2590](https://github.com/hashicorp/terraform-provider-google-beta/pull/2590)) -* compute: promoted HA VPN fields in `google_compute_vpn_tunnel` to GA ([#2607](https://github.com/hashicorp/terraform-provider-google-beta/pull/2607)) -* compute: promoted `google_compute_external_vpn_gateway` to GA ([#2607](https://github.com/hashicorp/terraform-provider-google-beta/pull/2607)) -* compute: promoted `google_compute_ha_vpn_gateway` to GA ([#2607](https://github.com/hashicorp/terraform-provider-google-beta/pull/2607)) -* provider: added support for service account impersonation. ([#2604](https://github.com/hashicorp/terraform-provider-google-beta/pull/2604)) -* spanner: added `deletion_protection` field to `google_spanner_database` to make deleting them require an explicit intent. ([#2612](https://github.com/hashicorp/terraform-provider-google-beta/pull/2612)) - -BUG FIXES: -* all: fixed misleading "empty non-retryable error" message that was appearing in debug logs ([#2618](https://github.com/hashicorp/terraform-provider-google-beta/pull/2618)) -* compute: fixed incorrect import format for `google_compute_global_network_endpoint` ([#2594](https://github.com/hashicorp/terraform-provider-google-beta/pull/2594)) -* compute: fixed issue where `google_compute_[region_]backend_service.backend.max_utilization` could not be updated ([#2620](https://github.com/hashicorp/terraform-provider-google-beta/pull/2620)) -* iap: fixed an eventual consistency bug causing creates for `google_iap_brand` to fail ([#2592](https://github.com/hashicorp/terraform-provider-google-beta/pull/2592)) -* provider: fixed an issue where the request headers would grow proportionally to the number of resources in a given `terraform apply` ([#2621](https://github.com/hashicorp/terraform-provider-google-beta/pull/2621)) -* serviceusage: fixed bug where concurrent activations/deactivations of project services would fail, now they retry ([#2591](https://github.com/hashicorp/terraform-provider-google-beta/pull/2591)) - -## 3.44.0 (October 19, 2020) - -BREAKING CHANGE: -* Added `deletion_protection` to `google_sql_database_instance`, which defaults to true. SQL instances can no longer be destroyed without setting `deletion_protection = false`. ([#2579](https://github.com/hashicorp/terraform-provider-google-beta/pull/2579)) - -FEATURES: -* **New Data Source:** `google_app_engine_default_service_account` ([#2568](https://github.com/hashicorp/terraform-provider-google-beta/pull/2568)) -* **New Data Source:** `google_pubsub_topic` ([#2556](https://github.com/hashicorp/terraform-provider-google-beta/pull/2556)) - -IMPROVEMENTS: -* bigquery: added ability for `google_bigquery_dataset_access` to retry quota errors since quota refreshes quickly. ([#2584](https://github.com/hashicorp/terraform-provider-google-beta/pull/2584)) -* bigquery: added `MONTH` and `YEAR` as allowed values in `google_bigquery_table.time_partitioning.type` ([#2562](https://github.com/hashicorp/terraform-provider-google-beta/pull/2562)) -* cloud_tasks: added `stackdriver_logging_config` field to `cloud_tasks_queue` resource ([#2572](https://github.com/hashicorp/terraform-provider-google-beta/pull/2572)) -* compute: added support for updating `network_interface.[d].network_ip` on `google_compute_instance` when changing network or subnetwork ([#2590](https://github.com/hashicorp/terraform-provider-google-beta/pull/2590)) -* compute: added `maintenance_policy` field to `google_compute_node_group` ([#2586](https://github.com/hashicorp/terraform-provider-google-beta/pull/2586)) -* compute: added filter field to google_compute_image datasource ([#2573](https://github.com/hashicorp/terraform-provider-google-beta/pull/2573)) -* dataproc: Added `graceful_decomissioning_timeout` field to `dataproc_cluster` resource ([#2571](https://github.com/hashicorp/terraform-provider-google-beta/pull/2571)) -* iam: fixed `google_service_account_id_token` datasource to work with User ADCs and Impersonated Credentials ([#2560](https://github.com/hashicorp/terraform-provider-google-beta/pull/2560)) -* logging: Added support for exclusions options for `google_logging_project_sink ` ([#2569](https://github.com/hashicorp/terraform-provider-google-beta/pull/2569)) -* logging: added bucket creation based on custom-id given for the resource `google_logging_project_bucket_config` ([#2575](https://github.com/hashicorp/terraform-provider-google-beta/pull/2575)) -* oslogin: added ability to set a `project` on `google_os_login_ssh_public_key` ([#2583](https://github.com/hashicorp/terraform-provider-google-beta/pull/2583)) -* redis: Added `auth_enabled` field to `google_redis_instance` ([#2570](https://github.com/hashicorp/terraform-provider-google-beta/pull/2570)) -* resourcemanager: added a precheck that the serviceusage API is enabled to `google_project` when `auto_create_network` is false, as configuring the GCE API is required in that circumstance ([#2566](https://github.com/hashicorp/terraform-provider-google-beta/pull/2566)) -* sql: added a check to `google_sql_database_instance` to catch failures early by seeing if Service Networking Connections already exists for the private network of the instance. ([#2579](https://github.com/hashicorp/terraform-provider-google-beta/pull/2579)) - -BUG FIXES: -* accessapproval: fixed issue where, due to a recent API change, `google_*_access_approval.enrolled_services.cloud_product` entries specified as a URL would result in a permadiff ([#2565](https://github.com/hashicorp/terraform-provider-google-beta/pull/2565)) -* compute: fixed ability to clear `description` field on `google_compute_health_check` and `google_compute_region_health_check` ([#2580](https://github.com/hashicorp/terraform-provider-google-beta/pull/2580)) -* monitoring: fixed bug where deleting a `google_monitoring_dashboard` would give an "unsupported protocol scheme" error ([#2558](https://github.com/hashicorp/terraform-provider-google-beta/pull/2558)) - -## 3.43.0 (October 12, 2020) - -FEATURES: -* **New Data Source:** `google_pubsub_topic` ([#2556](https://github.com/hashicorp/terraform-provider-google-beta/pull/2556)) -* **New Data Source:** `google_compute_global_forwarding_rule` ([#2548](https://github.com/hashicorp/terraform-provider-google-beta/pull/2548)) -* **New Data Source:** `google_cloud_run_service` ([#2539](https://github.com/hashicorp/terraform-provider-google-beta/pull/2539)) -* **New Resource:** `google_bigtable_table_iam_member` ([#2536](https://github.com/hashicorp/terraform-provider-google-beta/pull/2536)) -* **New Resource:** `google_bigtable_table_iam_binding` ([#2536](https://github.com/hashicorp/terraform-provider-google-beta/pull/2536)) -* **New Resource:** `google_bigtable_table_iam_policy` ([#2536](https://github.com/hashicorp/terraform-provider-google-beta/pull/2536)) - -IMPROVEMENTS: -* appengine: added ability to manage pre-firestore appengine applications. ([#2533](https://github.com/hashicorp/terraform-provider-google-beta/pull/2533)) -* bigquery: added support for `google_bigquery_table` `materialized_view` field ([#2532](https://github.com/hashicorp/terraform-provider-google-beta/pull/2532)) -* cloudbuild: Added `COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY` support to `google_cloudbuild_trigger.github.pull_request.comment_control` field ([#2552](https://github.com/hashicorp/terraform-provider-google-beta/pull/2552)) -* compute: added additional fields to the `google_compute_forwarding_rule` datasource. ([#2550](https://github.com/hashicorp/terraform-provider-google-beta/pull/2550)) -* dns: added `forwarding_path` field to `google_dns_policy` resource ([#2540](https://github.com/hashicorp/terraform-provider-google-beta/pull/2540)) -* netblock: changed `google_netblock_ip_ranges` to read from cloud.json file rather than DNS record ([#2543](https://github.com/hashicorp/terraform-provider-google-beta/pull/2543)) - -BUG FIXES: -* accessapproval: fixed issue where, due to a recent API change, `google_*_access_approval.enrolled_services.cloud_product` entries specified as a URL would result in a permadiff -* artifactregistry: fixed an issue where `google_artifact_registry_repository` would import an empty state ([#2546](https://github.com/hashicorp/terraform-provider-google-beta/pull/2546)) -* bigquery: fixed an issue in `google_bigquery_job` where non-US locations could not be read ([#2542](https://github.com/hashicorp/terraform-provider-google-beta/pull/2542)) -* cloudrun: fixed an issue in `google_cloud_run_domain_mapping` where labels provided by Google would cause a diff ([#2531](https://github.com/hashicorp/terraform-provider-google-beta/pull/2531)) -* compute: Fixed an issue where `google_compute_region_backend_service` required `healthChecks` for a serverless network endpoint group. ([#2547](https://github.com/hashicorp/terraform-provider-google-beta/pull/2547)) -* container: fixed `node_config.image_type` perma-diff when specified in lower case. ([#2538](https://github.com/hashicorp/terraform-provider-google-beta/pull/2538)) -* datacatalog: fixed an error in `google_data_catalog_tag` when trying to set boolean field to `false` ([#2534](https://github.com/hashicorp/terraform-provider-google-beta/pull/2534)) -* monitoring: fixed bug where deleting a `google_monitoring_dashboard` would give an "unsupported protocol scheme" error - -## 3.42.0 (October 05, 2020) - -FEATURES: -* **New Resource:** google_data_loss_prevention_deidentify_template ([#2524](https://github.com/hashicorp/terraform-provider-google-beta/pull/2524)) - -IMPROVEMENTS: -* compute: added support for updating `network_interface.[d].network` and `network_interface.[d].subnetwork` properties on `google_compute_instance`. ([#2517](https://github.com/hashicorp/terraform-provider-google-beta/pull/2517)) -* container: added `notification_config` to `google_container_cluster` ([#2521](https://github.com/hashicorp/terraform-provider-google-beta/pull/2521)) -* dataflow: added `region` field to `google_dataflow_flex_template_job` resource ([#2520](https://github.com/hashicorp/terraform-provider-google-beta/pull/2520)) -* healthcare: added field `parser_config.version` to `google_healthcare_hl7_v2_store` ([#2516](https://github.com/hashicorp/terraform-provider-google-beta/pull/2516)) - -BUG FIXES: -* bigquery: fixed an issue where `google_bigquery_table` would crash while reading an empty schema ([#2518](https://github.com/hashicorp/terraform-provider-google-beta/pull/2518)) -* compute: fixed an issue where `google_compute_instance_template` would throw an error for unspecified `disk_size_gb` values while upgrading the provider. ([#2515](https://github.com/hashicorp/terraform-provider-google-beta/pull/2515)) -* resourcemanager: fixed an issue in retrieving `google_active_folder` data source when the display name included whitespace ([#2528](https://github.com/hashicorp/terraform-provider-google-beta/pull/2528)) - -## 3.41.0 (September 28, 2020) - -IMPROVEMENTS: -* container: Added support for `datapath_provider` to `google_container_cluster` ([#2492](https://github.com/hashicorp/terraform-provider-google-beta/pull/2492)) -* cloudfunctions: added the ALLOW_INTERNAL_AND_GCLB option to `ingress_settings` of `google_cloudfunctions_function` resource. ([#2493](https://github.com/hashicorp/terraform-provider-google-beta/pull/2493)) -* composer: allowed in-place updates to webserver and database machine type ([#2491](https://github.com/hashicorp/terraform-provider-google-beta/pull/2491)) -* compute: added `SEV_CAPABLE` option to `guestOsFeatures` in `google_compute_image` resource. ([#2503](https://github.com/hashicorp/terraform-provider-google-beta/pull/2503)) -* tpu: added `use_service_networking` to `google_tpu_node` which enables Shared VPC Support. ([#2497](https://github.com/hashicorp/terraform-provider-google-beta/pull/2497)) - -BUG FIXES: -* cloudidentity: Fixed upstream breakage of `google_identity_group`. ([#2507](https://github.com/hashicorp/terraform-provider-google-beta/pull/2507)) - -## 3.40.0 (September 22, 2020) - -DEPRECATIONS: -* bigtable: deprecated `instance_type` for `google_bigtable_instance` - it is now recommended to leave field unspecified. ([#2477](https://github.com/hashicorp/terraform-provider-google-beta/pull/2477)) - -FEATURES: -* **New Data Source:** `google_compute_region_ssl_certificate` ([#2476](https://github.com/hashicorp/terraform-provider-google-beta/pull/2476)) -* **New Resource:** `google_compute_target_grpc_proxy` ([#2488](https://github.com/hashicorp/terraform-provider-google-beta/pull/2488)) - -IMPROVEMENTS: -* cloudlbuild: added `options` and `artifacts` properties to `google_cloudbuild_trigger` ([#2490](https://github.com/hashicorp/terraform-provider-google-beta/pull/2490)) -* compute: added GRPC as a valid value for `google_compute_backend_service.protocol` (and regional equivalent) ([#2478](https://github.com/hashicorp/terraform-provider-google-beta/pull/2478)) -* compute: added 'all' option for `google_compute_firewall` ([#2465](https://github.com/hashicorp/terraform-provider-google-beta/pull/2465)) -* container: added support for `load_balancer_type` to `google_container_cluster` Cloud Run config addon. ([#2487](https://github.com/hashicorp/terraform-provider-google-beta/pull/2487)) -* dataflow: added `transformnameMapping` to `google_dataflow_job` ([#2480](https://github.com/hashicorp/terraform-provider-google-beta/pull/2480)) -* serviceusage: added ability to pass google.project.id to `google_project_service.project` ([#2479](https://github.com/hashicorp/terraform-provider-google-beta/pull/2479)) -* spanner: added schema update/update ddl support for `google_spanner_database` ([#2489](https://github.com/hashicorp/terraform-provider-google-beta/pull/2489)) - -BUG FIXES: -* bigtable: fixed the update behaviour of the `single_cluster_routing` sub-fields in `google_bigtable_app_profile` ([#2482](https://github.com/hashicorp/terraform-provider-google-beta/pull/2482)) -* dataproc: fixed issues where updating `google_dataproc_cluster.cluster_config.autoscaling_policy` would do nothing, and where there was no way to remove a policy. ([#2483](https://github.com/hashicorp/terraform-provider-google-beta/pull/2483)) -* osconfig: fixed a potential crash in `google_os_config_patch_deployment` due to an unchecked nil value in `recurring_schedule` ([#2481](https://github.com/hashicorp/terraform-provider-google-beta/pull/2481)) -* serviceusage: fixed intermittent failure when a service is already being modified - added retries ([#2469](https://github.com/hashicorp/terraform-provider-google-beta/pull/2469)) -* serviceusage: fixed an issue where `bigquery.googleapis.com` was getting enabled as the `bigquery-json.googleapis.com` alias instead, incorrectly. This had no user impact yet, but the alias may go away in the future. ([#2469](https://github.com/hashicorp/terraform-provider-google-beta/pull/2469)) - -## 3.39.0 (September 15, 2020) - -IMPROVEMENTS: -* compute: added network field to `compute_target_instance` ([#2456](https://github.com/hashicorp/terraform-provider-google-beta/pull/2456)) -* compute: added storage_locations field to `google_compute_snapshot` ([#2461](https://github.com/hashicorp/terraform-provider-google-beta/pull/2461)) -* compute: added `kms_key_service_account`, `kms_key_self_link ` fields to `snapshot_encryption_key` field in `google_compute_snapshot` ([#2461](https://github.com/hashicorp/terraform-provider-google-beta/pull/2461)) -* compute: added `source_disk_encryption_key.kms_key_service_account` field to `google_compute_snapshot` ([#2461](https://github.com/hashicorp/terraform-provider-google-beta/pull/2461)) -* container: Added `self_link` to google_container_cluster ([#2457](https://github.com/hashicorp/terraform-provider-google-beta/pull/2457)) - -BUG FIXES: -* bigquery: fixed a bug when a BigQuery table schema didn't have `name` in the schema. Previously it would panic; now it logs an error. ([#2462](https://github.com/hashicorp/terraform-provider-google-beta/pull/2462)) -* bigquery: fixed bug where updating `clustering` would force a new resource rather than update. ([#2459](https://github.com/hashicorp/terraform-provider-google-beta/pull/2459)) -* bigquerydatatransfer: fixed `params.secret_access_key` perma-diff for AWS S3 data transfer config types by adding a `sensitive_params` block with the `secret_access_key` attribute. ([#2451](https://github.com/hashicorp/terraform-provider-google-beta/pull/2451)) -* compute: fixed bug where `delete_default_routes_on_create=true` was not actually deleting the default routes on create. ([#2460](https://github.com/hashicorp/terraform-provider-google-beta/pull/2460) - -## 3.38.0 (September 08, 2020) - -DEPRECATIONS: -* storage: deprecated `bucket_policy_only` field in `google_storage_bucket` in favour of `uniform_bucket_level_access` ([#2442](https://github.com/hashicorp/terraform-provider-google-beta/pull/2442)) - -FEATURES: -* **New Resource:** google_compute_disk_iam_binding ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) -* **New Resource:** google_compute_disk_iam_member ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) -* **New Resource:** google_compute_disk_iam_policy ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) -* **New Resource:** google_compute_region_disk_iam_binding ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) -* **New Resource:** google_compute_region_disk_iam_member ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) -* **New Resource:** google_compute_region_disk_iam_policy ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) -* **New Resource:** google_data_loss_prevention_inspect_template ([#2433](https://github.com/hashicorp/terraform-provider-google-beta/pull/2433)) -* **New Resource:** google_data_loss_prevention_job_trigger ([#2433](https://github.com/hashicorp/terraform-provider-google-beta/pull/2433)) -* **New Resource:** google_data_loss_prevention_stored_info_type ([#2444](https://github.com/hashicorp/terraform-provider-google-beta/pull/2444)) -* **New Resource:** google_project_service_identity ([#2430](https://github.com/hashicorp/terraform-provider-google-beta/pull/2430)) - -IMPROVEMENTS: -* compute: Added graceful termination to `google_compute_instance_group_manager` create calls so that partially created instance group managers will resume the original operation if the Terraform process is killed mid create. ([#2446](https://github.com/hashicorp/terraform-provider-google-beta/pull/2446)) -* container: added project override support to `google_container_cluster` and `google_container_nodepool` ([#2428](https://github.com/hashicorp/terraform-provider-google-beta/pull/2428)) -* notebooks: added `PD_BALANCED` as a possible disk type for `google_notebooks_instance` ([#2438](https://github.com/hashicorp/terraform-provider-google-beta/pull/2438)) -* osconfig: added rollout field to `google_os_config_patch_deployment` ([#2449](https://github.com/hashicorp/terraform-provider-google-beta/pull/2449)) -* provider: added a new field `billing_project` to the provider that's associated as a billing/quota project with most requests when `user_project_override` is true ([#2427](https://github.com/hashicorp/terraform-provider-google-beta/pull/2427)) -* resourcemanager: added additional fields to `google_projects` datasource ([#2440](https://github.com/hashicorp/terraform-provider-google-beta/pull/2440)) -* serviceusage: added project override support to `google_project_service` ([#2428](https://github.com/hashicorp/terraform-provider-google-beta/pull/2428)) - -BUG FIXES: -* bigquerydatatransfer: fixed `params.secret_access_key` perma-diff for AWS S3 data transfer config types by adding a `sensitive_params` block with the `secret_access_key` attribute. ([#2451](https://github.com/hashicorp/terraform-provider-google-beta/pull/2451)) -* compute: Fixed bug with `google_netblock_ip_ranges` data source failing to read from the correct URL ([#2448](https://github.com/hashicorp/terraform-provider-google-beta/pull/2448)) -* compute: fixed updating `google_compute_instance.shielded_instance_config` by adding it to the `allow_stopping_for_update` list ([#2436](https://github.com/hashicorp/terraform-provider-google-beta/pull/2436)) -* notebooks: fixed broken `google_notebooks_instance.instance_owners` field by making it a list instead of a string ([#2438](https://github.com/hashicorp/terraform-provider-google-beta/pull/2438)) - -## 3.37.0 (August 31, 2020) -NOTES: -* Drop recommendation to use -provider= on import in documentation ([#2417](https://github.com/hashicorp/terraform-provider-google-beta/pull/2417)) - -FEATURES: -* **New Resource:** `google_compute_image_iam_binding` ([#2410](https://github.com/hashicorp/terraform-provider-google-beta/pull/2410)) -* **New Resource:** `google_compute_image_iam_member` ([#2410](https://github.com/hashicorp/terraform-provider-google-beta/pull/2410)) -* **New Resource:** `google_compute_image_iam_policy` ([#2410](https://github.com/hashicorp/terraform-provider-google-beta/pull/2410)) -* **New Resource:** `google_compute_disk_iam_binding` ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) -* **New Resource:** `google_compute_disk_iam_member` ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) -* **New Resource:** `google_compute_disk_iam_policy` ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) -* **New Resource:** `google_compute_region_disk_iam_binding` ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) -* **New Resource:** `google_compute_region_disk_iam_member` ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) -* **New Resource:** `google_compute_region_disk_iam_policy` ([#2424](https://github.com/hashicorp/terraform-provider-google-beta/pull/2424)) - -IMPROVEMENTS: -* appengine: added `vpc_access_connector` field to `google_app_engine_standard_app_version` resource ([#2405](https://github.com/hashicorp/terraform-provider-google-beta/pull/2405)) -* bigquery: added `notification_pubsub_topic` field to `google_bigquery_data_transfer_config` resource ([#2411](https://github.com/hashicorp/terraform-provider-google-beta/pull/2411)) -* composer: added `database_config` and `web_server_config` to `google_composer_environment` resource ([#2419](https://github.com/hashicorp/terraform-provider-google-beta/pull/2419)) -* compute: Added custom metadata fields and filter expressions to `google_compute_subnetwork` flow log configuration ([#2416](https://github.com/hashicorp/terraform-provider-google-beta/pull/2416)) -* compute: Added support to `google_compute_backend_service` for setting a serverless regional network endpoint group as `backend.group` ([#2408](https://github.com/hashicorp/terraform-provider-google-beta/pull/2408)) -* compute: added support for pd-balanced disk type for `google_compute_instance` ([#2421](https://github.com/hashicorp/terraform-provider-google-beta/pull/2421)) -* container: added support for `kubelet_config` and `linux_node_config` to GKE node pools ([#2279](https://github.com/hashicorp/terraform-provider-google-beta/pull/2279), [#2403](https://github.com/hashicorp/terraform-provider-google-beta/pull/2403)) -* container: added support for pd-balanced disk type for `google_container_node_pool` ([#2421](https://github.com/hashicorp/terraform-provider-google-beta/pull/2421)) -* memcached: added discovery_endpoint to `resource_memcached_instance` ([#2414](https://github.com/hashicorp/terraform-provider-google-beta/pull/2414)) -* pubsub: added `retry_policy` to `google_pubsub_subscription` resource ([#2412](https://github.com/hashicorp/terraform-provider-google-beta/pull/2412)) - -BUG FIXES: -* compute: fixed an issue where `google_compute_url_map` `path_matcher.default_route_action` would conflict with `default_url_redirect` ([#2406](https://github.com/hashicorp/terraform-provider-google-beta/pull/2406)) -* kms: updated `data_source_secret_manager_secret_version` to have consistent id value ([#2415](https://github.com/hashicorp/terraform-provider-google-beta/pull/2415)) - -## 3.36.0 (August 24, 2020) - -FEATURES: -* **New Resource:** `google_active_directory_domain_trust` ([#2401](https://github.com/hashicorp/terraform-provider-google-beta/pull/2401)) -* **New Resource:** `google_access_context_manager_service_perimeters` ([#2382](https://github.com/hashicorp/terraform-provider-google-beta/pull/2382)) -* **New Resource:** `google_access_context_manager_access_levels` ([#2382](https://github.com/hashicorp/terraform-provider-google-beta/pull/2382)) -* **New Resource:** `google_folder_access_approval_settings` ([#2373](https://github.com/hashicorp/terraform-provider-google-beta/pull/2373)) -* **New Resource:** `google_organization_access_approval_settings` ([#2373](https://github.com/hashicorp/terraform-provider-google-beta/pull/2373)) -* **New Resource:** `google_project_access_approval_settings` ([#2373](https://github.com/hashicorp/terraform-provider-google-beta/pull/2373)) -* **New Resource:** `google_bigquery_table_iam_policy` ([#2392](https://github.com/hashicorp/terraform-provider-google-beta/pull/2392)) -* **New Resource:** `google_bigquery_table_iam_binding` ([#2392](https://github.com/hashicorp/terraform-provider-google-beta/pull/2392)) -* **New Resource:** `google_bigquery_table_iam_member` ([#2392](https://github.com/hashicorp/terraform-provider-google-beta/pull/2392)) - -IMPROVEMENTS: -* billing: added `last_period_amount` field to `google_billing_budget` to allow setting budget amount automatically to the last billing period's spend. ([#2378](https://github.com/hashicorp/terraform-provider-google-beta/pull/2378)) -* compute: added confidential_instance_config block to google_compute_instance ([#2369](https://github.com/hashicorp/terraform-provider-google-beta/pull/2369)) -* compute: added confidential_instance_config block to google_compute_instance_template ([#2369](https://github.com/hashicorp/terraform-provider-google-beta/pull/2369)) -* compute: added grpc_health_check block to compute_health_check ([#2389](https://github.com/hashicorp/terraform-provider-google-beta/pull/2389)) -* compute: added grpc_health_check block to compute_region_health_check ([#2389](https://github.com/hashicorp/terraform-provider-google-beta/pull/2389)) -* pubsub: added `enable_message_ordering` support to `google_pubsub_subscription` ([#2390](https://github.com/hashicorp/terraform-provider-google-beta/pull/2390)) -* sql: added project field to `google_sql_database_instance` datasource. ([#2370](https://github.com/hashicorp/terraform-provider-google-beta/pull/2370)) -* storage: added `ARCHIVE` as an accepted class for `google_storage_bucket` and `google_storage_bucket_object` ([#2385](https://github.com/hashicorp/terraform-provider-google-beta/pull/2385)) - -BUG FIXES: -* all: updated base urls for compute, dns, storage, and bigquery APIs to their recommended endpoints ([#2396](https://github.com/hashicorp/terraform-provider-google-beta/pull/2396)) -* bigquery: fixed a bug where `dataset_access.iam_member` would produce inconsistent results after apply. ([#2397](https://github.com/hashicorp/terraform-provider-google-beta/pull/2397)) -* bigquery: fixed an issue with `use_legacy_sql` not being set to `false`. ([#2375](https://github.com/hashicorp/terraform-provider-google-beta/pull/2375)) -* cloudidentity: fixed a bug with importing `google_cloud_identity_group` and `google_cloud_identity_group_membership` ([#2379](https://github.com/hashicorp/terraform-provider-google-beta/pull/2379)) -* cloudidentity: fixed cloud identity datasources to handle pagination ([#2387](https://github.com/hashicorp/terraform-provider-google-beta/pull/2387)) -* compute: set the default value for log_config.enable on `google_compute_health_check` to avoid permanent diff on plan/apply. ([#2399](https://github.com/hashicorp/terraform-provider-google-beta/pull/2399)) -* dns: fixed an issue where `google_dns_managed_zone` would not remove `private_visibility_config` on updates ([#2380](https://github.com/hashicorp/terraform-provider-google-beta/pull/2380)) -* sql: fixed an issue where `google_sql_database_instance` would throw an error when removing `private_network`. Removing `private_network` now recreates the resource. ([#2400](https://github.com/hashicorp/terraform-provider-google-beta/pull/2400)) - -## 3.35.0 (August 17, 2020) -NOTES: -* all: Updated lists of enums to display the enum options in the documentation pages. ([#2340](https://github.com/hashicorp/terraform-provider-google-beta/pull/2340)) - -FEATURES: -* **New Resource:** `google_compute_region_network_endpoint_group` (supports serverless NEGs) ([#2348](https://github.com/hashicorp/terraform-provider-google-beta/pull/2348)) - -IMPROVEMENTS: -* appengine: converted `google_app_engine_standard_app_version`'s `inbound_services` to an enum array, which enhances docs and provides some client-side validation. ([#2344](https://github.com/hashicorp/terraform-provider-google-beta/pull/2344)) -* billing_budget: Added support for `monitoring_notification_channels` to allow sending budget notifications to Cloud Monitoring email notification channels. ([#2366](https://github.com/hashicorp/terraform-provider-google-beta/pull/2366)) -* cloudbuild: added tags, source, queue_ttl, logs_bucket, substitutions, and secrets to `google_cloudbuild_trigger` ([#2335](https://github.com/hashicorp/terraform-provider-google-beta/pull/2335)) -* cloudfunctions: Updated the `google_cloudfunctions_function` datasource to include new fields available in the API. ([#2334](https://github.com/hashicorp/terraform-provider-google-beta/pull/2334)) -* compute: added `source_image` and `source_snapshot` to `google_compute_image` ([#2356](https://github.com/hashicorp/terraform-provider-google-beta/pull/2356)) -* compute: added confidential_instance_config block to google_compute_instance ([#2369](https://github.com/hashicorp/terraform-provider-google-beta/pull/2369)) -* compute: added confidential_instance_config block to google_compute_instance_template ([#2369](https://github.com/hashicorp/terraform-provider-google-beta/pull/2369)) -* iam: Added `public_key_type` field to `google_service_account_key ` ([#2368](https://github.com/hashicorp/terraform-provider-google-beta/pull/2368)) -* memcached: added memcacheVersion input and memcacheNodes output field to `google_memcache_instance` ([#2336](https://github.com/hashicorp/terraform-provider-google-beta/pull/2336)) -* pubsub: added `filter` field to `google_pubsub_subscription` resource ([#2367](https://github.com/hashicorp/terraform-provider-google-beta/pull/2367)) -* resource-manager: updated documentation for `folder_iam_*` and `organization_iam_*` resources. ([#2365](https://github.com/hashicorp/terraform-provider-google-beta/pull/2365)) -* sql: added support for point_in_time_recovery for `google_sql_database_instance` ([#2338](https://github.com/hashicorp/terraform-provider-google-beta/pull/2338)) - -BUG FIXES: -* appengine: Set `iap` to computed in `google_app_engine_application` ([#2342](https://github.com/hashicorp/terraform-provider-google-beta/pull/2342)) -* artifactrepository: Fixed import failure of `google_artifact_registry_repository`. ([#2345](https://github.com/hashicorp/terraform-provider-google-beta/pull/2345)) -* compute: fixed shielded instance config, which had been failing to apply due to a field rename on the GCP side. ([#2337](https://github.com/hashicorp/terraform-provider-google-beta/pull/2337)) -* monitoring: fixed validation rules for `google_monitoring_slo` `windows_based_sli.metric_sum_in_range.max` field ([#2354](https://github.com/hashicorp/terraform-provider-google-beta/pull/2354)) -* osconfig: fixed `google_os_config_patch_deployment` `windows_update.classifications` field to work correctly, accepting multiple values. ([#2340](https://github.com/hashicorp/terraform-provider-google-beta/pull/2340)) - -## 3.34.0 (August 11, 2020) -NOTES: -* redis: explicitly noted in `google_redis_instance` documentation that `"REDIS_5_0"` is supported ([#2323](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2323)) -* all: fix markdown formatting while showing enum values in documentation ([#2327](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2327)) - -FEATURES: -* **New Resource:** `google_compute_compute_organization_security_policy_association` ([#2333](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2333)) -* **New Resource:** `google_compute_compute_organization_security_policy_rule` ([#2333](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2333)) -* **New Resource:** `google_compute_compute_organization_security_policy` ([#2333](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2333)) - -IMPROVEMENTS: -* bigtable: added support for labels in `google_bigtable_instance` ([#2325](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2325)) -* cloudfunctions: updated the `google_cloudfunctions_function` datasource to include new fields available in the API. ([#2334](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2334)) -* compute: masked automatically applied GKE Sandbox node labels and taints on node pools ([#2320](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2320)) -* redis: added `persistence_iam_identity` output field to `google_redis_instance` ([#2323](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2323)) -* storage: added output-only `media_link` to `google_storage_bucket_object` ([#2331](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2331)) - -BUG FIXES: -* compute: fixed issue where the `project` field in `data.google_compute_network_endpoint_group` was returning an error when specified ([#2324](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2324)) -* notebook: fixed bug where not setting `data_disk_type` or `disk_encryption` would cause a diff on the next plan ([#2332](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2332)) -* sourcerepo: fixed perma-diff in `google_sourcerepo_repository` ([#2316](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2316)) -* all: fixed crash due to nil context when loading credentials ([#2321](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2321)) - -## 3.33.0 (August 04, 2020) - -DEPRECATIONS: -* compute: deprecated `enable_logging` on `google_compute_firewall`, define `log_config.metadata` to enable logging instead. ([#2310](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2310)) - -FEATURES: -* **New Resource:** `google_active_directory_domain` ([#2309](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2309)) -* **New Resource:** `google_dataflow_flex_template_job` ([#2303](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2303)) - -IMPROVEMENTS: -* cloudrun: added `ports` field to `google_cloud_run_service` `templates.spec.containers` ([#2311](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2311)) -* compute: added `log_config.metadata` to `google_compute_firewall`, defining this will enable logging. ([#2310](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2310)) - -BUG FIXES: -* container: Fixed a crash in `google_container_cluster` when `""` was specified for `resource_usage_export_config.bigquery_destination.dataset_id`. ([#2296](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2296)) -* endpoints: Fixed a crash when `google_endpoints_service` is used on a machine without timezone data ([#2302](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2302)) -* resourcemanager: bumped `google_project` timeout defaults to 10 minutes (from 4) ([#2306](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2306) - -## 3.32.0 (July 27, 2020) -FEATURES: -* **New Data Source:** `google_sql_database_instance` #2841 ([#2273](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2273)) -* **New Resource:** `google_cloud_asset_folder_feed` ([#2284](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2284)) -* **New Resource:** `google_cloud_asset_organization_feed` ([#2284](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2284)) -* **New Resource:** `google_cloud_asset_project_feed` ([#2284](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2284)) -* **New Resource:** `google_monitoring_metric_descriptor` ([#2290](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2290)) -* **New Resource:** `google_os_config_guest_policies` ([#2276](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2276)) - -IMPROVEMENTS: -* cluster: Added `default_snat_status` field for `google_container_cluster` resource. ([#2283](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2283)) -* filestore: Added `nfs_export_options` field on `google_filestore_instance.file_shares`. ([#2289](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2289)) -* filestore: Added support for filestore high scale tier. ([#2289](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2289)) -* resourcemanager: Added `folder_id` as computed attribute to `google_folder` resource and datasource. ([#2287](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2287)) -* compute: Added support to `google_compute_backend_service` for setting a network endpoint group as `backend.group`. ([#2304](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2304)) - -BUG FIXES: -* container: Fixed `google_container_cluster.pod_security_policy_config` not being set when disabled. -* container: Fixed a crash in `google_container_cluster` when `""` was specified for `resource_usage_export_config.bigquery_destination.dataset_id`. ([#2296](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2296)) -* bigquery: Fixed bug where a permadiff would show up when adding a column to the middle of a `bigquery_table.schema` ([#2275](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2275)) -* notebook: Fixed bug where many fields were being written as empty to state, causing a diff on the next plan ([#2288](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2288)) -* notebook: Fixed bug where setting `network` or `subnet` to a full URL would succeed, but cause a diff on the next plan ([#2288](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2288)) -* notebook: Fixed bug where updating certain fields would result in a no-op update call instead of a create/destroy. Now, the only field that is updatable in place is `labels` ([#2288](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2288)) - -## 3.31.0 (July 20, 2020) -FEATURES: -* **New Data Source:** `google_service_account_id_token` ([#2269](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2269)) -* **New Resource:** `google_cloudiot_device` ([#2266](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2266)) - -IMPROVEMENTS: -* bigquery: added support for BigQuery custom schemas for external data using CSV / NDJSON ([#2264](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2264)) -* datafusion: changed `version` field to be settable in `google_data_fusion_instance` resource ([#2268](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2268)) - -BUG FIXES: -* container: fixed a bug where `useIpAlias` was not defaulting to true inside the `ip_allocation_policy` block ([#2260](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2260)) -* memcache: fixed field `memcache_parameters` to work correctly on `google_memcache_instance` ([#2261](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2261)) - -## 3.30.0 (July 13, 2020) -FEATURES: -* **New Data Source:** `google_game_services_game_server_deployment_rollout` ([#2258](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2258)) -* **New Resource:** `google_os_config_patch_deployment` ([#2253](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2253)) - -IMPROVEMENTS: -* artifactregistry: Added field `kms_key_name` to `google_artifact_registry_repository` ([#2254](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2254)) - -BUG FIXES: -* container: added the ability to update `database_encryption` without recreating the cluster. ([#2259](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2259)) -* container: fixed a bug where useIpAlias was not defaulting to true inside the `ip_allocation_policy` block ([#2260](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2260)) -* endpoints: fixed `google_endpoints_service` to allow dependent resources to plan based on the `config_id` value. ([#2248](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2248)) -* runtimeconfig: fixed `Requested entity was not found.` error when config was deleted outside of terraform. ([#2257](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2257)) - -## 3.29.0 (July 06, 2020) -NOTES: -* added the `https://www.googleapis.com/auth/cloud-identity` scope to the provider by default ([#2224](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2224)) -* `google_app_engine_*_version`'s `service` field is required; previously it would have passed validation but failed on apply if it were absent. ([#6720](https://github.com/terraform-providers/terraform-provider-google/pull/6720)) - -FEATURES: -* **New Data Source:** `google_cloud_identity_group_memberships` ([#2240](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2240)) -* **New Data Source:** `google_cloud_identity_groups` ([#2240](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2240)) -* **New Resource:** `google_cloud_identity_group_membership` ([#2224](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2224)) -* **New Resource:** `google_cloud_identity_group` ([#2224](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2224)) -* **New Resource:** `google_kms_key_ring_import_job` ([#2225](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2225)) -* **New Resource:** `google_folder_iam_audit_config` ([#2237](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2237)) - -IMPROVEMENTS: -* bigquery: Added `"HOUR"` option for `google_bigquery_table` time partitioning (`type`) ([#2235](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2235)) -* compute: Added `mode` to `google_compute_region_autoscaler` `autoscaling_policy` ([#2226](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2226)) -* compute: Added `scale_down_control` to `google_compute_region_autoscaler` `autoscaling_policy` ([#2226](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2226)) -* container: added `networking_mode` to `google_container_cluster` ([#2243](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2243)) -* endpoints: enable `google_endpoints_service`-dependent resources to plan based on the `config_id` value. ([#2248](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2248)) -* monitoring: added `request_method`, `content_type`, and `body` fields within the `http_check` object to `google_monitoring_uptime_check_config` resource ([#2233](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2233)) - -BUG FIXES: -* compute: Fixed an issue in `google_compute_managed_ssl_certificate` where multiple fully qualified domain names would cause a permadiff ([#2241](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2241)) -* compute: fixed an issue in `compute_url_map` where `path_matcher` sub-fields would conflict with `default_service` ([#2247](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2247)) -* container: fixed an issue in `google_container_cluster` where `workload_metadata_config` would cause a permadiff ([#2242](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2242)) - -## 3.28.0 (June 29, 2020) - -FEATURES: -* **New Data Source:** `google_redis_instance` ([#2209](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2209)) -* **New Resource:** `google_notebook_environment` ([#2199](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2199)) -* **New Resource:** `google_notebook_instance` ([#2199](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2199)) - -IMPROVEMENTS: -* appengine: Enabled provisioning Firestore on a new project by adding the option to specify `database_type` in `google_app_engine_application` ([#2193](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2193)) -* compute: Added `mode` to `google_compute_autoscaler` `autoscaling_policy` ([#2214](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2214)) -* compute: Added `remove_instance_state_on_destroy` to `google_compute_per_instance_config` to control deletion of underlying instance state. ([#2187](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2187)) -* compute: Added `remove_instance_state_on_destroy` to `google_compute_region_per_instance_config` to control deletion of underlying instance state. ([#2187](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2187)) -* compute: Added `scale_down_control` for `google_compute_autoscaler` `autoscaling_policy` ([#2214](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2214)) -* compute: Added SHARED_LOADBALANCER_VIP as an option for `google_compute_address.purpose` ([#2204](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2204)) -* dns: enabled `google_dns_policy` to accept network id ([#2189](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2189)) - -BUG FIXES: -* appengine: Added polling to `google_app_engine_firewall_rule` to prevent issues with eventually consistent creation ([#2197](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2197)) -* compute: Allowed updating `google_compute_network_peering_routes_config ` `import_custom_routes` and `export_custom_routes` to false ([#2190](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2190)) -* netblock: fixed the google netblock ranges returned by the `google_netblock_ip_ranges` by targeting json on gstatic domain instead of reading SPF dns records (solution provided by network team) ([#2210](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2210)) - -## 3.27.0 (June 23, 2020) - -IMPROVEMENTS: -* accesscontextmanager: Added `custom` config to `google_access_context_manager_access_level` ([#2180](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2180)) -* cloudbuild: Added `invert_regex` flag in Github PullRequestFilter and PushFilter in triggerTemplate ([#2171](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2171)) -* cloudrun: Added `template.spec.timeout_seconds` to `google_cloud_run_service` ([#2164](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2164)) -* compute: Added `cpu_over_commit_type` to `google_compute_node_template` ([#2176](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2176)) -* compute: Added `min_node_cpus` to the `scheduling` blocks on `compute_instance` and `compute_instance_template` ([#2169](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2169)) -* compute: Added `export_subnet_routes_with_public_ip` and `import_subnet_routes_with_public_ip` to `google_compute_network_peering` ([#2170](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2170)) -* compute: Added `remove_instance_state_on_destroy` to `google_compute_per_instance_config` to control deletion of underlying instance state. ([#2187](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2187)) -* container: Added support for `private_cluster_config.master_global_access_config` to `google_container_cluster` ([#2157](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2157)) -* compute: Added support for `google_compute_instance_group` `instances` to accept instance id field as well as self_link ([#2161](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2161)) -* dns: Added support for `google_dns_policy` network to accept `google_compute_network.id` ([#2189](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2189)) -* redis: Added validation for name attribute in `redis_instance` ([#2167](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2167)) - -BUG FIXES: -* bigquery: Fixed `range_partitioning.range.start` so that the value `0` is sent in `google_bigquery_table` ([#2153](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2153)) -* container: Fixed a regression in `google_container_cluster` where the location was not inferred when using a `subnetwork` shortname value like `name` ([#2160](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2160)) -* datastore: Added retries to `google_datastore_index` requests when under contention. ([#2154](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2154)) -* kms: Fixed the `id` value in the `google_kms_crypto_key_version` datasource to include a `/v1` part following `//cloudkms.googleapis.com/`, making it useful for interpolation into Binary Authorization. ([#2165](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2165)) - - -## 3.26.0 (June 15, 2020) - -FEATURES: -* **New Resource:** `google_data_catalog_tag` ([#2144](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2144)) -* **New Resource:** `google_bigquery_dataset_iam_binding` ([#2147](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2147)) -* **New Resource:** `google_bigquery_dataset_iam_member` ([#2147](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2147)) -* **New Resource:** `google_bigquery_dataset_iam_policy` ([#2147](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2147)) -* **New Resource:** `google_memcache_instance` ([#2142](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2142)) -* **New Resource:** `google_network_management_connectivity_test` ([#2138](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2138)) - -IMPROVEMENTS: -* compute: added `default_route_action` to `compute_url_map` and `compute_url_map.path_matchers` ([#2143](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2143)) -* container : Added cluster_telemetry attribute to `google_container_cluster` ([#2149](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2149)) -* dialogflow: Changed `google_dialogflow_agent.time_zone` to be updatable ([#2133](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2133)) -* dns: enabled google_dns_managed_zone to accept network id for two attributes ([#2139](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2139)) -* healthcare: Added support for `streaming_configs` to `google_healthcare_fhir_store` ([#2145](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2145)) -* monitoring: added `matcher` attribute to `content_matchers` block for `google_monitoring_uptime_check_config` ([#2150](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2150)) - -BUG FIXES: -* compute: fixed issue where trying to update the region of `google_compute_subnetwork` would fail instead of destroying/recreating the subnetwork ([#2134](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2134)) -* dataflow: added retries in `google_dataflow_job` for common retryable API errors when waiting for job to update ([#2146](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2146)) -* dataflow: changed the update logic for `google_dataflow_job` to wait for the replacement job to start successfully before modifying the resource ID to point to the replacement job ([#2140](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2140)) - -## 3.25.0 (June 08, 2020) -BREAKING CHANGES: -* bigquery: Add ability to manage credentials to `google_bigquery_connection`. This field is required as the resource is not useful without them. ([#2111](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2111)) - -FEATURES: -* **New Resource:** `google_data_catalog_tag_template` ([#2120](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2120)) -* **New Resource:** `google_container_analysis_occurence` ([#2114](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2114)) - -IMPROVEMENTS: -* appengine: added `inbound_services` to `StandardAppVersion` resource ([#2131](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2131)) -* bigquery: Added support for `google_bigquery_table` `hive_partitioning_options` ([#2121](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2121)) -* container_analysis: Added top-level generic note fields to `google_container_analysis_note` ([#2114](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2114)) - -BUG FIXES: -* bigquery: Fixed an issue where `google_bigquery_job` would return "was present, but now absent" error after job creation ([#2122](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2122)) -* container: Changed retry logic for `google_container_node_pool` deletion to use timeouts and retry errors more specifically when cluster is updating. ([#2115](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2115)) -* dataflow: fixed an issue where `google_dataflow_job` would try to update `max_workers` ([#2110](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2110)) -* dataflow: fixed an issue where updating `on_delete` in `google_dataflow_job` would cause the job to be replaced ([#2110](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2110)) -* compute: fixed issue where removing all target pools from `google_compute_instance_group_manager` or `google_compute_region_instance_group_manager` had no effect ([#2124](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2124)) -* functions: Added retry to `google_cloudfunctions_function` creation when API returns error while pulling source from GCS ([#2116](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2116)) -* provider: Removed credentials from output error when provider cannot parse given credentials ([#2113](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2113)) - -## 3.24.0 (June 01, 2020) - -BREAKING CHANGES: -* bigquery: Add ability to manage credentials to `google_bigquery_connection`. This field is required as the resource is not useful without them. ([#2111](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2111)) - -FEATURES: -* **New Resource:** `google_compute_machine_image` ([#2109](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2109)) -* **New Resources:** `google_data_catalog_entry_group_iam_*` ([#2098](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2098)) -* **New Resource:** `google_data_catalog_entry_group` ([#2098](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2098)) -* **New Resource:** `google_data_catalog_entry` ([#2100](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2100)) - -IMPROVEMENTS: -* appengine: added `handlers` to `google_flexible_app_version` ([#2105](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2105)) -* bigquery: suppressed diffs between fully qualified URLs and relative paths that reference the same table or dataset in `google_bigquery_job` ([#2107](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2107)) -* container: Added update support for `node_config.workload_metadata_config` to `google_container_node_pool` ([#2091](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2091)) - -BUG FIXES: -* appengine: added ability to fully sync `StandardAppVersion` resources ([#2096](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2096)) -* bigquery: Fixed an issue with `google_bigquery_dataset_access` failing for primitive role `roles/bigquery.dataViewer` ([#2092](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2092)) -* dataflow: fixed an issue where `google_dataflow_job` would try to update `max_workers` ([#2110](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2110)) -* dataflow: fixed an issue where updating `on_delete` in `google_dataflow_job` would cause the job to be replaced ([#2110](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2110)) -* os_login: Fixed `google_os_login_ssh_public_key` `key` field attempting to update in-place ([#2094](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2094)) - -## 3.23.0 (May 25, 2020) - -BREAKING CHANGES: -* The base url for the `monitoring` endpoint no longer includes the API version (previously "v3/"). If you use a `monitoring_custom_endpoint`, remove the trailing "v3/". ([#2088](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2088)) - -FEATURES: -* **New Data Source:** `google_iam_testable_permissions` ([#2071](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2071)) -* **New Resource:** `google_monitoring_dashboard` ([#2088](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2088)) - -IMPROVEMENTS: -* bigquery: Added ability for various `table_id` fields (and one `dataset_id` field) in `google_bigquery_job` to specify a relative path instead of just the table id ([#2079](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2079)) -* composer: Added support for `google_composer_environment` `config.private_environment_config.cloud_sql_ipv4_cidr_block` ([#2075](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2075)) -* composer: Added support for `google_composer_environment` `config.private_environment_config.web_server_ipv4_cidr_block` ([#2075](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2075)) -* composer: Added support for `google_composer_environment` `web_server_network_access_control` for private environments ([#2075](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2075)) -* container: Added update support for `node_config.workload_metadata_config` to `google_container_node_pool` ([#2091](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2091)) -* container: Added `min_cpu_platform` to google_container_cluster.cluster_autoscaling.auto_provisioning_defaults ([#2086](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2086)) -* container: Added `release_channel_default_version` to `data.google_container_engine_versions`, allowing you to find the default version for a release channel ([#2068](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2068)) -* container: Added the ability to unspecify `google_container_cluster`'s `min_master_version` field ([#2068](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2068)) -* container: Added update support to `google_container_cluster`'s `release_channel` field ([#2068](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2068)) -* container: Added `config_connector_config` `google_container_cluster` ([#2064](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2064)) -* monitoring: Added window-based SLI to `google_monitoring_slo` ([#2070](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2070)) - -BUG FIXES: -* compute: Fixed an issue where `google_compute_route` creation failed while VPC peering was in progress. ([#2082](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2082)) -* Fixed an issue where data source `google_organization` would ignore exact domain matches if multiple domains were found ([#2085](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2085)) -* compute: Fixed `google_compute_interconnect_attachment` `edge_availability_domain` diff when the field is unspecified ([#2084](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2084)) -* compute: Fixed error where plan would error if `google_compute_region_disk_resource_policy_attachment` had been deleted outside of terraform. ([#2065](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2065)) -* compute: Raise limit on number of `src_ip_ranges` values in `google_compute_security_policy` to supported 10 ([#2076](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2076)) -* iam: Fixed an issue where `google_service_account` shows an error after creating the resource ([#2074](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2074)) - -## 3.22.0 (May 18, 2020) -BREAKING CHANGE: -* `google_bigtable_instance` resources now cannot be destroyed unless `deletion_protection = false` is set in state for the resource. ([#2061](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2061)) - -FEATURES: -* **New Resource:** `google_compute_region_per_instance_config` ([#2046](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2046)) -* **New Resource:** `google_dialogflow_entity_type` ([#2052](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2052)) - -IMPROVEMENTS: -* bigtable: added `deletion_protection` field to `google_bigtable_instance` to make deleting them require an explicit intent. ([#2061](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2061)) -* compute: Added `google_compute_region_backend_service` `portName` parameter ([#2048](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2048)) -* dataproc: Updated `google_dataproc_cluster.software_config.optional_components` to include new options. ([#2049](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2049)) -* monitoring: Added `request_based` SLI support to `google_monitoring_slo` ([#2058](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2058)) -* storage: added `google_storage_bucket` bucket name to the error message when the bucket can't be deleted because it's not empty ([#2059](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2059)) - -BUG FIXES: -* bigquery: Fixed error where `google_bigquery_dataset_access` resources could not be found post-creation if role was set to a predefined IAM role with an equivalent primative role (e.g. `roles/bigquery.dataOwner` and `OWNER`) ([#2039](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2039)) -* compute: Fixed permadiff in `google_compute_instance_template`'s `network_tier`. ([#2054](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2054)) -* compute: Removed permadiff or errors on update for `google_compute_backend_service` and `google_compute_region_backend_service` when `consistent_hash` values were previously set on backend service but are not supported by updated value of `locality_lb_policy` ([#2044](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2044)) -* sql: Fixed occasional failure to delete `google_sql_database_instance` and `google_sql_user`. ([#2045](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2045)) - -## 3.21.0 (May 11, 2020) - -FEATURES: -* **New Resource:** `google_compute_per_instance_config` ([#2029](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2029)) -* **New Resource:** `google_logging_billing_account_bucket_config` ([#2008](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2008)) -* **New Resource:** `google_logging_folder_bucket_config` ([#2008](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2008)) -* **New Resource:** `google_logging_organization_bucket_config` ([#2008](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2008)) -* **New Resource:** `google_logging_project_bucket_config` ([#2008](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2008)) - -IMPROVEMENTS: -* all: add configurable timeouts to several resources that did not previously have them ([#2007](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2007)) -* bigquery: added `service_account_name` field to `google_bigquery_data_transfer_config` resource ([#2004](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2004)) -* cloudfunctions: Added validation to label keys for `google_cloudfunctions_function` as API errors aren't useful. ([#2009](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2009)) -* compute: Added support for `stateful_disk` to both `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager`. ([#2006](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2006)) -* container: added `kalm_config` addon to `google_container_cluster` ([#2027](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2027)) -* dataflow: Added drift detection for `google_dataflow_job` `template_gcs_path` and `temp_gcs_location` fields ([#2021](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2021)) -* dataflow: Added support for update-by-replacement to `google_dataflow_job` ([#2021](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2021)) -* dataflow: added `additional_experiments` field to `google_dataflow_job` ([#2005](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2005)) -* dataproc: added component gateway support to `google_dataproc_cluster` ([#2035](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2035)) -* storage: Added retries for `google_storage_bucket_iam_*` on 412 (precondition not met) errors for eventually consistent bucket creation. ([#2011](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2011)) - -BUG FIXES: -* all: fixed bug where timeouts specified in units other than minutes were getting incorrectly rounded. Also fixed several instances of timeout values being used from the wrong method. ([#2002](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2002)) -* accesscontextmanager: Fixed setting `require_screen_lock` to true for `google_access_context_manager_access_level` ([#2010](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2010)) -* appengine: Changed `google_app_engine_application` to respect updates in `iap` ([#2000](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2000)) -* bigquery: Fixed error where `google_bigquery_dataset_access` resources could not be found post-creation if role was set to a predefined IAM role with an equivalent primative role (e.g. `roles/bigquery.dataOwner` and `OWNER`) ([#2039](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2039)) -* bigquery: Fixed the `google_sheets_options` at least one of logic. ([#2030](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2030)) -* cloudscheduler: Fixed permadiff for `google_cloud_scheduler_job.retry_config.*` block when API provides default values ([#2028](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2028)) -* compute: Added lock to prevent `google_compute_route` from changing while peering operations are happening on its network ([#2016](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2016)) -* compute: Stopped force-recreation of `google_compute_backend_service` and `google_compute_backend_service` on updating `locality_lb_policy` ([#2012](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2012)) -* compute: fixed issue where the default value for the attribute `advertise_mode` on `google_compte_router_peer` was not populated on import ([#2024](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2024)) -* container: Fixed occasional error with `container_node_pool` partially-successful creations not being recorded if an error occurs on the GCP side. ([#2038](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2038)) -* container: fixed issue where terraform would error if a gke instance group was deleted out-of-band ([#2015](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2015)) -* storage: Fixed setting/reading `google_storage_bucket_object` metadata on API object ([#2025](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2025)) -* storage: Marked the credentials field in `google_storage_object_signed_url` as sensitive so it doesn't expose private credentials. ([#2026](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2026)) - -## 3.20.0 (May 04, 2020) - -* **New Resource:** `google_artifact_registry_repository` ([#1981](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1981)) -* **New Resource:** `google_artifact_registry_repository_iam_policy` ([#1981](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1981)) -* **New Resource:** `google_artifact_registry_repository_iam_binding` ([#1981](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1981)) -* **New Resource:** `google_artifact_registry_repository_iam_member` ([#1981](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1981)) -* **New Resource:** `google_bigquery_connection` ([#2014](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2014)) - -IMPROVEMENTS: -* appengine: Added `automatic_scaling`, `basic_scaling`, and `manual_scaling` to `google_app_engine_standard_app_version` ([#1984](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1984)) -* bigquery: added `service_account_name` field to `google_bigquery_data_transfer_config` resource ([#2004](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2004)) -* bigtable: added ability to add/remove column families in `google_bigtable_table` ([#1988](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1988)) -* cloudfunctions: Added validation to label keys for `google_cloudfunctions_function` as API errors aren't useful. ([#2009](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2009)) -* compute: Added support for `stateful_disk` to both `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager`. ([#2006](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2006)) -* compute: Added support for default URL redirects to `google_compute_url_map` and `google_compute_region_url_map` ([#1998](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1998)) -* dataflow: Added `additional_experiments` field to `google_dataflow_job` ([#2005](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2005)) -* dns: Added `service_directory_config` field to`google_dns_managed_zone` ([#1976](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1976)) -* compute: Added update of `google_compute_backend_service` and `google_compute_backend_service` field `locality_lb_policy ([#2012](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2012)) - -BUG FIXES: -* accesscontextmanager: Fixed setting `require_screen_lock` to true for `google_access_context_manager_access_level` ([#2010](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2010)) -* appengine: Changed `google_app_engine_application` to respect updates in `iap` ([#2000](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2000)) -* storage: Added retries for `google_storage_bucket_iam_*` on 412 (precondition not met) errors for eventually consistent bucket creation. ([#2011](https://github.com/terraform-providers/terraform-provider-google-beta/pull/2011)) - -## 3.19.0 (April 27, 2020) - -FEATURES: -* **New Resource:** `google_bigquery_job` ([#1959](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1959)) -* **New Resource:** `google_monitoring_slo` ([#1953](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1953)) -* **New Resource:** `google_service_directory_endpoint` ([#1964](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1964)) -* **New Resource:** `google_service_directory_namespace` ([#1964](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1964)) -* **New Resource:** `google_service_directory_service` ([#1964](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1964)) - -IMPROVEMENTS: -* bigtable: Reduced the minimum number of nodes for the `bigtable_instace` resource from 3 to 1. ([#1968](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1968)) -* container: Added support for `google_container_cluster` Compute Engine persistent disk CSI driver ([#1969](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1969)) -* compute: Added support for `google_compute_instance` `resource_policies` field ([#1957](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1957)) -* compute: Added support for `google_compute_resource_policy` group placement policies ([#1957](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1957)) -* healthcare: Added `schema` field to `google_healthcare_hl7_v2_store` ([#1962](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1962)) - -BUG FIXES: -* dataproc: Fixed diff when `google_dataproc_cluster` `preemptible_worker_config.0.num_instances` is sized to 0 and other `preemptible_worker_config` subfields are set ([#1954](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1954)) -* resourcemanager: added a wait to `google_project` so that projects are more likely to be ready before the resource finishes creation ([#1970](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1970)) -* sql: Allowed `binary_log_enabled` to be disabled. ([#1973](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1973)) -* sql: Fixed behaviour in `google_sql_database` when the parent instance is deleted, removing it from state ([#1972](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1972)) - -## 3.18.0 (April 20, 2020) - -FEATURES: -* **New Data Source:** `google_firebase_web_app_config` ([#1950](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1950)) -* **New Data Source:** `google_firebase_web_app` ([#1950](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1950)) -* **New Data Source:** `google_monitoring_app_engine_service` ([#1944](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1944)) -* **New Resource:** `google_firebase_web_app` ([#1950](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1950)) -* **New Resource:** `google_monitoring_custom_service` ([#1944](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1944)) -* **New Resource:** `google_compute_global_network_endpoint` ([#1948](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1948)) -* **New Resource:** `google_compute_global_network_endpoint_group` ([#1948](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1948)) -* **New Resource:** `google_monitoring_slo` ([#1953](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1953)) - -IMPROVEMENTS: -* appengine: Added `iap.enabled` field to `google_app_engine_application` resource ([#1943](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1943)) -* iam: Added `name` field to `google_organization_iam_custom_role` ([#1951](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1951)) -* iam: Added `name` field to `google_project_iam_custom_role` ([#1951](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1951)) - -BUG FIXES: -* container: Fixed importing/reading `google_container_node_pool` resources in non-RUNNING states ([#1952](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1952)) -* container: Made `addons_config.cloudrun_config` able to be updated without recreating and destroying. ([#1942](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1942)) -* container: Made `addons_config.dns_cache_config` able to be updated without recreating and destroying. ([#1942](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1942)) -* monitoring: Made `display_name` optional on `google_monitoring_notification_channel ` ([#1947](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1947)) - -## 3.17.0 (April 13, 2020) - -FEATURES: -* **New Resource:** `google_bigquery_dataset_access` ([#1924](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1924)) -* **New Resource:** `google_dialogflow_intent` ([#1936](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1936)) -* **New Resource:** `google_os_login_ssh_public_key` ([#1922](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1922)) - -IMPROVEMENTS: -* accesscontextmanager: added `spec` and `use_explicit_dry_run_spec` to `google_access_context_manager_service_perimeter` to test perimeter configurations in dry-run mode. ([#1940](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1940)) -* compute: Added update support for `google_compute_interconnect_attachment` `admin_enabled` ([#1931](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1931)) -* compute: Added field `log_config` to `google_compute_health_check` and `google_compute_region_health_check` to enable health check logging. ([#1934](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1934)) -* compute: Added more import formats for `google_compute_instance` ([#1933](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1933)) -* sourcerepo: allowed `google_sourcerepo_repo` `pubsub_configs.topic` to accept short topic names in addition to full references. ([#1938](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1938)) - -BUG FIXES: -* compute: Fixed diff on default value for `google_compute_interconnect_attachment` `admin_enabled` ([#1931](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1931)) -* compute: Fixed perma-diff on `google_compute_interconnect_attachment` `candidate_subnets` ([#1931](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1931)) -* compute: fixed bug where `google_compute_instance_from_template` instance defaults were overriding `scheduling` ([#1939](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1939)) -* iap: `project` can now be unset in `iap_web_iam_member` and will read from the default `project` ([#1935](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1935)) -* serviceusage: fixed issue where `google_project_services` attempted to read a project before enabling the API that allows that read ([#1937](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1937)) -* sql: fixed error that occurred on `google_sql_database_instance` when `settings.ip_configuration` was set but `ipv4_enabled` was not set to true and `private_network` was not configured, by defaulting `ipv4_enabled` to true. ([#1926](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1926)) -* storage: fixed bug where deleting a `google_storage_bucket` that contained non-deletable objects would retry indefinitely ([#1929](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1929)) - -## 3.16.0 (April 06, 2020) -FEATURES: -* **New Data Source:** `google_monitoring_uptime_check_ips` ([#1912](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1912)) -* **New Resource:** `firebase_project_location`: finalizes the firebase location. ([#1919](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1919)) - -IMPROVEMENTS: -* cloudfunctions: Added `ingress_settings` field to `google_cloudfunctions_function` ([#1898](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1898)) -* cloudfunctions: added support for `vpc_connector_egress_settings` to `google_cloudfunctions_function` ([#1904](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1904)) -* accesscontextmanager: added `status.vpc_accessible_services` to `google_access_context_manager_service_perimeter` to control which services are available from the perimeter's VPC networks to the restricted Google APIs IP address range. ([#1910](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1910)) -* cloudrun: added ability to autogenerate revision name ([#1900](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1900)) -* compute: added ability to resize `google_compute_reservation` ([#1908](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1908)) -* container: added `enable_resource_consumption_metering` to `resource_usage_export_config` in `google_container_cluster` ([#1901](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1901)) -* dns: added ability to update `google_dns_managed_zone.dnssec_config` ([#1914](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1914)) -* pubsub: Added `dead_letter_policy` support to `google_pubsub_subscription` ([#1913](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1913)) - -BUG FIXES: -* compute: Fixed an issue where `port` could not be removed from health checks ([#1906](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1906)) -* storage: fixed an issue where `google_storage_bucket_iam_member` showed a diff for bucket self links ([#1918](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1918)) - -## 3.15.0 (March 30, 2020) - -FEATURES: -* **New Resource:** `google_compute_instance_group_named_port` ([#1869](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1869)) -* **New Resource:** `google_service_usage_consumer_quota_override` ([#1884](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1884)) -* **New Resource:** `google_firebase_project`: enables Firebase for a referenced Google project ([#1885](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1885)) -* **New Resource:** `google_iap_brand` ([#1848](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1848)) -* **New Resource:** `google_iap_client` ([#1848](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1848)) -* **New Resource:** `google_appengine_flexible_app_version` ([#1849](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1849)) - -IMPROVEMENTS: -* accesscontextmanager: Added `regions` field to `google_access_context_manager_access_level` ([#1882](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1882)) -* compute: added support for IAM conditions in `google_compute_subnet_iam_*` IAM resources ([#1877](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1877)) -* kms: Added new field "Additional Authenticated Data" for Cloud KMS data source `google_kms_secret` ([#1886](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1886)) -* kms: Added new field "Additional Authenticated Data" for Cloud KMS resource `google_kms_secret_ciphertext` ([#1886](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1886)) - -BUG FIXES: -* kms: Fixed an issue in `google_kms_crypto_key_version` where `public_key` would return empty after apply ([#1879](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1879)) -* logging: Fixed import issue with `google_logging_metric` in a non-default project. ([#1876](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1876)) -* provider: Fixed an error with resources failing to upload large files (e.g. with `google_storage_bucket_object`) during retried requests ([#1894](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1894)) - -## 3.14.0 (March 23, 2020) - -FEATURES: -* **New Data Source:** `google_compute_instance_serial_port` ([#1860](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1860)) -* **New Resource:** `google_compute_region_ssl_certificate` ([#1863](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1863)) - -IMPROVEMENTS: -* compute: Added new attribute reference `current_status` to the `google_compute_instance` resource ([#1857](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1857)) -* container: Added `dns_cache_config` field to `google_container_cluster` resource ([#1853](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1853)) -* container: Updated `upgrade_settings` to read defaults from API for the `google_container_node_pool` resource ([#1859](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1859)) -* provider: Added provider-wide request retries for common temporary GCP error codes and network errors ([#1856](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1856)) -* redis: Added `connect_mode` field to `google_redis_instance` resource ([#1854](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1854)) - -## 3.13.0 (March 16, 2020) - -BREAKING CHANGES: -* dialogflow: Changed `google_dialogflow_agent.time_zone` to ForceNew. Updating this field will require recreation. This is due to a change in API behavior. ([#1827](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1827)) - -FEATURES: -* **New Resource:** `google_bigquery_reservation` ([#1833](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1833)) -* **New Resource:** `google_compute_region_disk_resource_policy_attachment` ([#1836](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1836)) -* **New Resource:** `google_sql_source_representation_instance` ([#1832](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1832)) - -IMPROVEMENTS: -* bigtable: Added support for full-name/id `instance_name` value in `google_bigtable_table` and `google_bigtable_gc_policy` ([#1830](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1830)) -* compute: Added `autoscaling_policy` to `google_compute_node_group` ([#1841](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1841)) -* compute: Added support for full-name/id `network_endpoint_group` value in `google_network_endpoint` ([#1831](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1831)) -* dialogflow: Changed `google_dialogflow_agent` to not read `tier` status ([#1829](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1829)) -* monitoring: Added `sensitive_labels` to `google_monitoring_notification_channel` so that labels like `password` and `auth_token` can be managed separately from the other labels and marked as sensitive. ([#1844](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1844)) - -BUG FIXES: -* all: fixed issue where nested objects were getting sent as null values to GCP on create instead of being omitted from requests ([#1822](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1822)) -* cloudfunctions: fixed `vpc_connector` to be updated properly in `google_cloudfunctions_function` ([#1825](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1825)) -* compute: fixed `google_compute_security_policy` from allowing two rules with the same priority. ([#1828](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1828)) -* compute: fixed bug where `google_compute_instance.scheduling.node_affinities.operator` would incorrectly accept `NOT` rather than `NOT_IN`. ([#1835](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1835)) -* container: Fixed issue where `google_container_node_pool` resources created in the 2.X series were failing to update after 3.11. ([#1846](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1846)) - -## 3.12.0 (March 09, 2020) - -IMPROVEMENTS: -* serviceusage: `google_project_service` no longer attempts to enable a service that is already enabled. ([#1814](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1814)) -* bigtable: Added support for full-name/id `instance` value in `google_bigtable_app_profile` ([#1804](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1804)) -* pubsub: Added polling to ensure correct resource state for negative-cached PubSub resources ([#1816](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1816)) - -BUG FIXES: -* compute: Fixed a scenario where `google_compute_instance_template` would cause a crash. ([#1812](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1812)) -* storage: Added check for bucket retention policy list being empty. ([#1807](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1807)) -* storage: Added locking for operations involving `google_storage_*_access_control` resources to prevent errors from ACLs being added at the same time. ([#1806](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1806)) -* container: Fixed panic when upgrading `google_container_cluster` with autoscaling block. ([#1766](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1766)) - -## 3.11.0 (March 02, 2020) - -FEATURES: -* **New Data Source:** `google_compute_backend_bucket` ([#1778](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1778)) -* **New Resource:** `google_app_engine_service_split_traffic` ([#1785](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1785)) -* **New Resource:** `google_compute_packet_mirroring` ([#1791](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1791)) -* **New Resource:** Added new resource `google_game_services_game_server_cluster` ([#1789](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1789)) -* **New Resource:** Added new resource `google_game_services_game_server_config` ([#1789](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1789)) -* **New Resource:** Added new resource `google_game_services_game_server_deployment_rollout` ([#1789](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1789)) -* **New Resource:** Added new resource `google_game_services_game_server_deployment` ([#1789](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1789)) -* **New Resource:** Added new resource `google_game_services_realm` ([#1789](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1789)) - -IMPROVEMENTS: -* bigquery: Landed support for range-based partitioning in `google_bigquery_table` ([#1782](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1782)) -* compute: added check on `google_compute_router` for non-empty advertised_groups or advertised_ip_ranges values when advertise_mode is DEFAULT in the bgp block. ([#1776](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1776)) -* compute: added the ability to manage the status of `google_compute_instance` resources with the `desired_status` field ([#1786](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1786)) -* iam: `google_project_iam_member` and `google_project_iam_binding`'s `project` field can be specified with an optional `projects/` prefix ([#1780](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1780)) -* storage: added `metadata` to `google_storage_bucket_object`. ([#1779](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1779)) - -BUG FIXES: -* compute: Updated `google_project` to check for valid permissions on the parent billing account before creating and tainting the resource. ([#1777](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1777)) -* container: Fixed panic when upgrading `google_container_cluster` with `autoscaling` block ([#1766](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1766)) - -## 3.10.0 (February 25, 2020) - -BREAKING CHANGES: -* container: Fully removed `use_ip_aliases` and `create_subnetwork` fields to fix misleading diff for removed fields ([#1760](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1760)) - -FEATURES: -* **New Data Source:** `google_dns_keys` ([#1768](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1768)) -* **New Resource:** `google_datastore_index` ([#1755](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1755)) -* **New Resource:** `google_storage_hmac_key` ([#1765](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1765)) -* **New Resource:** `google_endpoints_service_iam_binding` ([#1761](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1761)) -* **New Resource:** `google_endpoints_service_iam_member` ([#1761](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1761)) -* **New Resource:** `google_endpoints_service_iam_policy` ([#1761](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1761)) - -IMPROVEMENTS: -* container: Enabled configuring autoscaling profile in GKE clusters (https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#autoscaling_profiles) ([#1756](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1756)) -* container: Allowed import/update/deletion of `google_container_cluster` in error states ([#1759](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1759)) -* container: Changed `google_container_node_pool` so node pools created in an error state will be marked as tainted on creation. ([#1758](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1758)) -* container: Allowed import/update/deletion of `google_container_node_pool` in error states and updated resource to wait for a stable state after any changes. ([#1758](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1758)) -* container: added label_fingerprint to `google_container_cluster` ([#1750](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1750)) -* dataflow: added `job_id` field to `google_dataflow_job` ([#1754](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1754)) -* dataflow: added computed `type` field to `google_dataflow_job`. ([#1771](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1771)) -* healthcare: added `version` field to `google_healthcare_fhir_store` ([#1769](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1769)) -* provider: Added retries for common network errors we've encountered. ([#1762](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1762)) - -## 3.9.0 (February 18, 2020) - -FEATURES: -* **New Resource:** `google_container_registry` ([#1725](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1725)) - -IMPROVEMENTS: -* all: improve error handling of 404s. ([#1728](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1728)) -* bigtable: added update support for `display_name` and `instance_type` ([#1751](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1751)) -* container: `google_container_cluster` will wait for a stable state after updates. ([#1737](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1737)) -* container: added support for `autoscaling_profile` to `google_container_cluster` ([#1756](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1756)) -* container: added `boot_disk_kms_key` to `node_config` block. ([#1736](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1736)) -* dataflow: added `job_id` field to `google_dataflow_job` ([#1754](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1754)) -* dialogflow: improve error handling by increasing retry count ([#1730](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1730)) -* resourcemanager: fixed retry behavior for updates in `google_project`, added retries for billing metadata requests ([#1735](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1735)) -* sql: add `encryption_key_name` to `google_sql_database_instance` ([#1724](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1724)) - -BUG FIXES: -* cloudrun: fixed permadiff caused by new API default values on `annotations` and `limits` ([#1727](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1727)) -* container: Removed restriction on `auto_provisioning_defaults` to allow both `oauth_scopes` and `service_account` to be set ([#1748](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1748)) -* firestore: fixed import of `google_firestore_index` when database or collection were non-default. ([#1741](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1741)) -* iam: Fixed an erroneous error during import of IAM resources when a provider default project/zone/region is not defined. ([#1734](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1734)) -* kms: Fixed issue where `google_kms_crypto_key_version` datasource would throw an Invalid Index error on plan ([#1740](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1740)) - -## 3.8.0 (February 10, 2020) -NOTES: -* provider: added documentation for the `id` field for many resources, including format ([#1697](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1697)) -BREAKING CHANGES: -* compute: Added conditional requirement of `google_compute_**region**_backend_service` `backend.capacity_scaler` to no longer accept the API default if not INTERNAL. Non-INTERNAL backend services must now specify `capacity_scaler` explicitly and have a total capacity greater than 0. In addition, API default of 1.0 must now be explicitly set and will be treated as nil or zero if not set in config. ([#1707](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1707)) - -FEATURES: -* **New Data Source:** `secret_manager_secret_version` ([#1708](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1708)) -* **New Resource:** `google_access_context_manager_service_perimeter_resource` ([#1712](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1712)) -* **New Resource:** `secret_manager_secret_version` ([#1708](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1708)) -* **New Resource:** `secret_manager_secret` ([#1708](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1708)) -* **New Resource:** `google_dialogflow_agent` ([#1706](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1706)) - -IMPROVEMENTS: -* appengine: added support for `google_app_engine_application.iap` ([#1703](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1703)) -* compute: `google_compute_security_policy` `rule.match.expr` field is now GA ([#1692](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1692)) -* compute: added additional validation to `google_cloud_router`'s `bgp.asn` field. ([#1699](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1699)) - -BUG FIXES: -* bigtable: fixed diff for DEVELOPMENT instances that are returned from the API with one node ([#1704](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1704)) -* compute: Fixed `backend.capacity_scaler` to actually set zero (0.0) value. ([#1707](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1707)) -* compute: Fixed `google_compute_**region**_backend_service` so it no longer has a permadiff if `backend.capacity_scaler` is unset in config by requiring capacity scaler. ([#1707](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1707)) -* compute: updated `google_compute_project_metadata_item` to fail on create if its key is already present in the project metadata. ([#1714](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1714)) -* logging: updated `bigquery_options` so the default value from the api will be set in state. ([#1694](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1694)) -* sql: undeprecated `settings.ip_configuration.authorized_networks.expiration_time` ([#1691](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1691)) - -## 3.7.0 (February 03, 2020) - -IMPROVEMENTS: -* binaryauthorization: moved from beta API to ga API in anticipation of beta API turndown. ([#1689](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1689)) -* dns: `google_dns_managed_zone` added support for Non-RFC1918 fields for reverse lookup and fowarding paths. ([#1685](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1685)) -* monitoring: Added `labels` and `user_labels` filters to data source `google_monitoring_notification_channel` ([#1666](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1666)) - -BUG FIXES: -* bigtable: fixed diff for DEVELOPMENT instances that are returned from the API with one node ([#1704](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1704)) -* compute: `google_compute_instance_template` added plan time check for any disks marked `boot` outside of the first disk ([#1684](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1684)) -* container: Fixed perma-diff in `google_container_cluster`'s `cluster_autoscaling.auto_provisioning_defaults`. ([#1679](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1679)) -* logging: updated `bigquery_options` so the default value from the api will be set in state. ([#1694](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1694)) -* storage: Stopped `project-owner` showing up in the diff for `google_storage_bucket_acl` ([#1674](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1674)) - -## 3.6.0 (January 29, 2020) - -KNOWN ISSUES: - -* bigtable: due to API changes, bigtable DEVELOPMENT instances may show a diff on `num_nodes`. There will be a fix in the 3.7.0 release of the provider. No known workarounds exist at the moment, but will be tracked in https://github.com/terraform-providers/terraform-provider-google/issues/5492. - -FEATURES: -* **New Data Source:** google_monitoring_notification_channel ([#1643](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1643)) -* **New Resource:** google_compute_network_peering_routes_config ([#1652](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1652)) - -IMPROVEMENTS: -* compute: added waiting logic to `google_compute_interconnect_attachment` to avoid modifications when the attachment is UNPROVISIONED ([#1664](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1664)) -* compute: made the `google_compute_network_peering` routes fields available in GA ([#1650](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1650)) -* datafusion: Added `service_account` field to `google_data_fusion_instance` ([#1660](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1660)) -* iap: added support for IAM conditions in `google_iap_tunnel_instance_iam_*` IAM resources ([#1654](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1654)) -* resourcemanager: restricted the length of the `description` field of `google_service_account`. It is now limited to 256 characters. ([#1646](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1646)) -* scheduler: Added `attempt_deadline` to `google_cloud_scheduler_job`. ([#1639](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1639)) -* storage: added `default_event_based_hold` to `google_storage_bucket` ([#1626](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1626)) - -BUG FIXES: -* compute: Fixed `google_compute_instance_from_template` with existing boot disks ([#1655](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1655)) -* compute: Fixed a bug in `google_compute_instance` when attempting to update a field that requires stopping and starting an instance with an encrypted disk ([#1658](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1658)) - -## 3.5.0 (January 22, 2020) - -DEPRECATIONS: -* kms: deprecated `data.google_kms_secret_ciphertext` as there was no way to make it idempotent. Instead, use the `google_kms_secret_ciphertext` resource. ([#1586](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1586)) -* sql: deprecated first generation-only fields on `google_sql_database_instance` ([#1628](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1628)) - -FEATURES: -* **New Resource:** `google_kms_secret_ciphertext` ([#1586](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1586)) - -IMPROVEMENTS: -* bigtable: added the ability to add/remove clusters from `google_bigtable_instance` ([#1589](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1589)) -* compute: added support for other resource types (like a Proxy) as a `target` to `google_compute_forwarding_rule` ([#1630](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1630)) -* dataproc: added `lifecycle_config` to `google_dataproc_cluster.cluster_config` ([#1593](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1593)) -* iam: updated to allow for empty bindings in `data_source_google_iam_policy` data source ([#1173](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1173)) -* provider: added retries for batched requests so failed batches will retry each single request separately. ([#1615](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1615)) -* resourcemanager: restricted the length of the `description` field of `google_service_account`. It is now limited to 256 characters. ([#1646](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1646)) - -BUG FIXES: -* bigtable: Fixed error on reading non-existent `google_bigtable_gc_policy`, `google_bigtable_instance`, `google_bigtable_table` ([#1597](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1597)) -* cloudfunctions: Fixed validation of `google_cloudfunctions_function` name to allow for 63 characters. ([#1640](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1640)) -* cloudtasks: Changed `max_dispatches_per_second` to a double instead of an integer. ([#1633](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1633)) -* compute: Added validation for `compute_resource_policy` to no longer allow invalid `start_time` values that weren't hourly. ([#1603](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1603)) -* compute: Fixed errors from concurrent creation/deletion of overlapping `google_compute_network_peering` resources. ([#1601](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1601)) -* compute: Stopped panic when using `usage_export_bucket` and the setting had been disabled manually. ([#1610](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1610)) -* compute: fixed `google_compute_router_nat` timeout fields causing a diff when using a long-lived resource ([#1613](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1613)) -* compute: fixed `google_compute_target_https_proxy.quic_override` causing a diff when using a long-lived resource ([#1611](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1611)) -* identityplatform: fixed `google_identity_platform_default_supported_idp_config` to correctly allow configuration of both `idp_id` and `client_id` separately ([#1638](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1638)) -* monitoring: Stopped `labels` from causing a perma diff on `AlertPolicy` ([#1622](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1622)) - -## 3.4.0 (January 07, 2020) - -DEPRECATIONS: -* kms: deprecated `data.google_kms_secret_ciphertext` as there was no way to make it idempotent. Instead, use the `google_kms_secret_ciphertext` resource. ([#1586](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1586)) - -BREAKING CHANGES: -* `google_iap_web_iam_*`, `google_iap_web_type_compute_iam_*`, `google_iap_web_type_app_engine_*`, and `google_iap_app_engine_service_iam_*` resources now support IAM Conditions (beta provider only). If any conditions had been created out of band before this release, take extra care to ensure they are present in your Terraform config so the provider doesn't try to create new bindings with no conditions. Terraform will show a diff that it is adding the condition to the resource, which is safe to apply. ([#1527](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1527)) -* `google_kms_key_ring_iam_*` and `google_kms_crypto_key_iam_*` resources now support IAM Conditions (beta provider only). If any conditions had been created out of band before this release, take extra care to ensure they are present in your Terraform config so the provider doesn't try to create new bindings with no conditions. Terraform will show a diff that it is adding the condition to the resource, which is safe to apply. ([#1524](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1524)) -* cloudrun: Changed `google_cloud_run_domain_mapping` to correctly match Cloud Run API expected format for `spec.route_name`, {serviceName}, instead of invalid projects/{project}/global/services/{serviceName} ([#1563](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1563)) -* compute: Added back ConflictsWith restrictions for ExactlyOneOf restrictions that were removed in v3.3.0 for `google_compute_firewall`, `google_compute_health_check`, and `google_compute_region_health_check`. This effectively changes an API-side failure that was only accessible in v3.3.0 to a plan-time one. ([#1534](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1534)) -* logging: Changed `google_logging_metric.metric_descriptors.labels` from a list to a set ([#1559](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1559)) -* resourcemanager: Added back ConflictsWith restrictions for ExactlyOneOf restrictions that were removed in v3.3.0 for `google_organization_policy`, `google_folder_organization_policy`, and `google_project_organization_policy`. This effectively changes an API-side failure that was only accessible in v3.3.0 to a plan-time one. ([#1534](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1534)) - -FEATURES: -* **New Data Source:** `google_sql_ca_certs` ([#1580](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1580)) -* **New Resource:** `google_identity_platform_default_supported_idp_config` ([#1523](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1523)) -* **New Resource:** `google_identity_platform_inbound_saml_config` ([#1523](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1523)) -* **New Resource:** `google_identity_platform_oauth_idp_config` ([#1523](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1523)) -* **New Resource:** `google_identity_platform_tenant_default_supported_idp_config` ([#1523](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1523)) -* **New Resource:** `google_identity_platform_tenant_inbound_saml_config` ([#1523](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1523)) -* **New Resource:** `google_identity_platform_tenant_oauth_idp_config` ([#1523](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1523)) -* **New Resource:** `google_identity_platform_tenant` ([#1523](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1523)) -* **New Resource:** `google_kms_crypto_key_iam_policy` ([#1554](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1554)) -* **New Resource:** `google_kms_secret_ciphertext` ([#1586](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1586)) - -IMPROVEMENTS: -* composer: Increased default timeouts for `google_composer_environment` ([#1539](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1539)) -* compute: Added graceful termination to `container_cluster` create calls so that partially created clusters will resume the original operation if the Terraform process is killed mid create. ([#1533](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1533)) -* compute: Fixed `google_compute_disk_resource_policy_attachment` parsing of region from zone to allow for provider-level zone and make error message more accurate` ([#1557](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1557)) -* datafusion: Increased default timeouts for `google_data_fusion_instance` ([#1545](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1545)) -* datafusion: Increased update timeout for updating `google_data_fusion_instance` ([#1538](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1538)) -* healthcare: Enabled request batching for (beta-only) Healthcare API IAM resources `google_healthcare_*_iam_*` to reduce likelihood of errors from very low default write quota. ([#1558](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1558)) -* iap: added support for IAM Conditions to the `google_iap_web_iam_*`, `google_iap_web_type_compute_iam_*`, `google_iap_web_type_app_engine_*`, and `google_iap_app_engine_service_iam_*` resources (beta provider only) ([#1527](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1527)) -* kms: added support for IAM Conditions to the `google_kms_key_ring_iam_*` and `google_kms_crypto_key_iam_*` resources (beta provider only) ([#1524](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1524)) -* provider: Reduced default `send_after` controlling the time interval after which a batched request sends. ([#1565](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1565)) - -BUG FIXES: -* all: fixed issue where many fields that were removed in 3.0.0 would show a diff when they were removed from config ([#1585](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1585)) -* bigquery: fixed `bigquery_table.encryption_configuration` to correctly recreate the table when modified ([#1591](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1591)) -* cloudrun: Changed `google_cloud_run_domain_mapping` to correctly match Cloud Run API expected format for `spec.route_name`, {serviceName}, instead of invalid projects/{project}/global/services/{serviceName} ([#1563](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1563)) -* cloudrun: Changed `cloud_run_domain_mapping` to poll for success or failure and throw an appropriate error when ready status returns as false. ([#1564](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1564)) -* cloudrun: Fixed `google_cloudrun_service` to allow update instead of force-recreation for changes in `spec` `env` and `command` fields ([#1566](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1566)) -* cloudrun: Removed unsupported update for `google_cloud_run_domain_mapping` to allow force-recreation. ([#1556](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1556)) -* cloudrun: Stopped returning an error when a `cloud_run_domain_mapping` was waiting on DNS verification. ([#1587](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1587)) -* compute: Fixed `google_compute_backend_service` to allow updating `cdn_policy.cache_key_policy.*` fields to false or empty. ([#1569](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1569)) -* compute: Fixed behaviour where `google_compute_subnetwork` did not record a value for `name` when `self_link` was specified. ([#1579](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1579)) -* container: fixed issue where an empty variable in `tags` would cause a crash ([#1543](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1543)) -* endpoints: Added operation wait for `google_endpoints_service` to fix 403 "Service not found" errors during initial creation ([#1560](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1560)) -* logging: Made `google_logging_metric.metric_descriptors.labels` a set to prevent diff from ordering ([#1559](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1559)) -* resourcemanager: added retries for `data.google_organization` ([#1553](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1553)) -* vpcaccess: marked `network` field as required in order to fail invalid configs at plan-time instead of at apply-time ([#1577](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1577)) - -## 3.3.0 (December 17, 2019) - -BREAKING CHANGES: -* `google_storage_bucket_iam_*` resources now support IAM Conditions (beta provider only). If any conditions had been created out of band before this release, take extra care to ensure they are present in your Terraform config so the provider doesn't try to create new bindings with no conditions. Terraform will show a diff that it is adding the condition to the resource, which is safe to apply. ([#1479](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1479)) - -FEATURES: -* **New Resource:** `google_compute_region_health_check` is now available in GA ([#1507](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1507)) -* **New Resource:** `google_deployment_manager_deployment` ([#1498](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1498)) - -IMPROVEMENTS: -* bigquery: added `PARQUET` as an option in `google_bigquery_table.external_data_configuration.source_format` ([#1514](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1514)) -* compute: Added `allow_global_access` for to `google_compute_forwarding_rule` resource. ([#1511](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1511)) -* compute: added support for up to 100 domains on `google_compute_managed_ssl_certificate` ([#1519](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1519)) -* dataproc: added support for `security_config` to `google_dataproc_cluster` ([#1492](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1492)) -* storage: added support for IAM Conditions to the `google_storage_bucket_iam_*` resources (beta provider only) ([#1479](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1479)) -* storage: updated `id` and `bucket` fields for `google_storage_bucket_iam_*` resources to use `b/{bucket_name}` ([#1479](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1479)) - -BUG FIXES: -* compute: Fixed an issue where interpolated values caused plan-time errors in `google_compute_router_interface`. ([#1517](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1517)) -* compute: relaxed ExactlyOneOf restrictions on `google_compute_firewall`, `google_compute_health_check`, and `google_compute_region_health_check` to enable the use of dynamic blocks with those resources. ([#1520](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1520)) -* iam: Fixed a bug that causes badRequest errors on IAM resources due to deleted serviceAccount principals ([#1501](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1501)) -* resourcemanager: relaxed ExactlyOneOf restrictions on `google_organization_policy `, `google_folder_organization_policy `, and `google_project_organization_policy ` to enable the use of dynamic blocks with those resources. ([#1520](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1520)) -* sourcerepo: Fixed a bug preventing repository IAM resources from referencing repositories with the `/` character in their name ([#1521](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1521)) -* sql: fixed bug where terraform would keep retrying to create new `google_sql_database_instance` with the name of a previously deleted instance ([#1500](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1500)) - -## 3.2.0 (December 11, 2019) - -DEPRECATIONS: -* compute: deprecated `fingerprint` field in `google_compute_subnetwork`. Its value is now always `""`. ([#1482](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1482)) - -FEATURES: -* **New Data Source:** `data_source_google_bigquery_default_service_account` ([#1471](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1471)) -* **New Resource:** cloudrun: Added support for `google_cloud_run_service` IAM resources: `google_cloud_run_service_iam_policy`, `google_cloud_run_service_iam_binding`, `google_cloud_run_service_iam_member` ([#1456](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1456)) - -IMPROVEMENTS: -* all: Added `synchronous_timeout` to provider block to allow setting higher per-operation-poll timeouts. ([#1449](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1449)) -* bigquery: Added KMS support to `google_bigquery_table` ([#1471](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1471)) -* cloudresourcemanager: Added `org_id` field to `google_organization` datasource to expose the raw organization id ([#1485](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1485)) -* cloudrun: Stopped requiring the root `metadata` block for `google_cloud_run_service`. ([#1478](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1478)) -* compute: added support for `expr` to `google_compute_security_policy.rule.match` ([#1465](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1465)) -* compute: added support for `path_rules` to `google_compute_region_url_map` ([#1489](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1489)) -* compute: added support for `path_rules` to `google_compute_url_map` ([#1483](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1483)) -* compute: added support for `route_rules` to `google_compute_region_url_map` ([#1493](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1493)) -* compute: added support for header actions and route rules to `google_compute_url_map` ([#1435](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1435)) -* dns: Added `visibility` field to `google_dns_managed_zone` data source ([#1462](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1462)) -* sourcerepo: added support for `pubsub_configs` to `google_sourcerepo_repository` ([#1455](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1455)) - -BUG FIXES: -* dns: fixed 503s caused by high numbers of `dns_record_set`s. ([#1477](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1477)) -* logging: updated `exponential_buckets.growth_factor` from integer to double. ([#1484](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1484)) -* storage: fixed bug where users without storage.objects.list permissions couldn't delete empty buckets ([#1443](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1443)) - -## 3.1.0 (December 05, 2019) - -BREAKING CHANGES: -* compute: field `peer_ip_address` in `google_compute_router_peer` is now required, to match the API behavior. ([#1396](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1396)) - -FEATURES: -* **New Resource:** `google_billing_budget` ([#1428](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1428)) -* **New Resource:** `google_cloud_tasks_queue` ([#1369](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1369)) -* **New Resource:** `google_organization_iam_audit_config` ([#1427](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1427)) - -IMPROVEMENTS: -* accesscontextmanager: added support for `require_admin_approval` and `require_corp_owned` in `google_access_context_manager_access_level`'s `device_policy`. ([#1403](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1403)) -* all: added retries for timeouts while fetching operations ([#1356](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1356)) -* cloudbuild: Added build timeout to `google_cloudbuild_trigger` ([#1404](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1404)) -* cloudresourcemanager: added support for importing `google_folder` in the form of the bare folder id, rather than requiring `folders/{bare_id}` ([#1430](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1430)) -* compute: Updated default timeouts on `google_compute_project_metadata_item`. ([#1436](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1436)) -* compute: `google_compute_disk` `disk_encryption_key.raw_key` is now sensitive ([#1445](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1445)) -* compute: `google_compute_disk` `source_image_encryption_key.raw_key` is now sensitive ([#1452](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1452)) -* compute: `google_compute_network_peering` resource can now be imported ([#1439](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1439)) -* compute: computed attribute `management_type` in `google_compute_router_peer` is now available. ([#1396](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1396)) -* compute: field `network` can now be specified on `google_compute_region_backend_service`, which allows internal load balancers to target the non-primary interface of an instance. ([#1418](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1418)) -* container: Added support for `peering_name` in `google_container_cluster.private_cluster_config`. ([#1438](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1438)) -* container: added `auto_provisioning_defaults` to `google_container_cluster.cluster_autoscaling` ([#1434](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1434)) -* container: added `upgrade_settings` support to `google_container_node_pool` ([#1400](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1400)) -* container: increased timeouts on `google_container_cluster` and `google_container_node_pool` ([#1386](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1386)) -* datafusion: Added `private_instance` and `network_config` fields to `google_data_fusion_instance` ([#1411](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1411)) -* kms: enabled use of `user_project_override` for the `kms_crypto_key` resource ([#1422](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1422)) -* kms: enabled use of `user_project_override` for the `kms_secret_ciphertext` data source ([#1433](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1433)) -* sql: added `root_password` field to `google_sql_database_instance` resource ([#1432](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1432)) - -BUG FIXES: -* bigquery: fixed an issue where bigquery table id formats from the `2.X` series caused an error at plan time ([#1448](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1448)) -* cloudbuild: Fixed incorrect dependency between `trigger_template` and `github` in `google_cloud_build_trigger`. ([#1410](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1410)) -* cloudfunctions: Fixed inability to set `google_cloud_functions_function` update timeout. ([#1447](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1447)) -* cloudrun: Wait for the cloudrun resource to reach a ready state before returning success. ([#1409](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1409)) -* compute: `google_compute_disk` `disk_encryption_key.raw_key` is now sensitive ([#1453](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1453)) -* compute: `self_link` in several datasources will now error on invalid values instead of crashing ([#1373](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1373)) -* compute: field `advertised_ip_ranges` in `google_compute_router_peer` can now be updated without recreating the resource. ([#1396](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1396)) -* compute: marked `min_cpu_platform` on `google_compute_instance` as computed so if it is not specified it will not cause diffs ([#1429](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1429)) -* dataproc: Changed default for `google_dataproc_autoscaling_policy` `secondary_worker_config.min_instances` from 2 to 0. ([#1408](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1408)) -* dns: Fixed issue causing `google_dns_record_set` deletion to fail when the managed zone ceased to exist before the deletion event. ([#1446](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1446)) -* iam: disallowed `deleted:` principals in IAM resources ([#1417](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1417)) -* sql: added retries to `google_sql_user` create and update to reduce flakiness ([#1399](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1399)) - -## 3.0.0 (December 04, 2019) - -NOTES: - -These are the changes between 3.0.0-beta.1 and the 3.0.0 final release. For changes since 2.20.0, see also the 3.0.0-beta.1 changelog entry below. - -**Please see [the 3.0.0 upgrade guide](https://www.terraform.io/docs/providers/google/guides/version_3_upgrade.html) for upgrade guidance.** - -BREAKING CHANGES: -* cloudrun: updated `cloud_run_service` to v1. Significant updates have been made to the resource including a breaking schema change. ([#1426](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1426)) - -BUG FIXES: -* compute: fixed a bug in `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager` that created an artificial diff when removing a now-removed field from a config ([#1401](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1401)) -* dns: Fixed bug causing `google_dns_managed_zone` datasource to always return a 404 ([#1405](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1405)) -* service_networking: fixed "An unknown error occurred" bug when creating multiple google_service_networking_connection resources in parallel ([#1246](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1246)) - -## 3.0.0-beta.1 (November 15, 2019) - -BREAKING CHANGES: - -* access_context_manager: Made `os_type` required on block `google_access_context_manager_access_level.basic.conditions.device_policy.os_constraints`. [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* all: changed any id values that could not be interpolated as self_links into values that could [MM#2461](https://github.com/GoogleCloudPlatform/magic-modules/pull/2461) -* app_engine: Made `ssl_management_type` required on `google_app_engine_domain_mapping.ssl_settings` [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* app_engine: Made `shell` required on `google_app_engine_standard_app_version.entrypoint`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* app_engine: Made `source_url` required on `google_app_engine_standard_app_version.deployment.files` and `google_app_engine_standard_app_version.deployment.zip`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* app_engine: Made `split_health_checks ` required on `google_app_engine_application.feature_settings` [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* app_engine: Made `script_path` required on `google_app_engine_standard_app_version.handlers.script`. [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* bigtable: Made `cluster_id` required on `google_bigtable_app_profile.single_cluster_routing`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* bigquery: Made at least one of `range` or `skip_leading_rows` required on `google_bigquery_table.external_data_configuration.google_sheets_options`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* bigquery: Made `role` required on `google_bigquery_dataset.access`. [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* bigtable: Made exactly one of `single_cluster_routing` or `multi_cluster_routing_use_any` required on `google_bigtable_app_profile`. [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* binary_authorization: Made `name_pattern` required on `google_binary_authorization_policy.admission_whitelist_patterns`. [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* binary_authorization: Made `evaluation_mode` and `enforcement_mode` required on `google_binary_authorization_policy.cluster_admission_rules`. [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* cloudbuild: made Cloud Build Trigger's trigger template required to match API requirements. [MM#2352](https://github.com/GoogleCloudPlatform/magic-modules/pull/2352) -* cloudbuild: Made `branch` required on `google_cloudbuild_trigger.github`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* cloudbuild: Made `steps` required on `google_cloudbuild_trigger.build`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* cloudbuild: Made `name` required on `google_cloudbuild_trigger.build.steps`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* cloudbuild: Made `name` and `path` required on `google_cloudbuild_trigger.build.steps.volumes`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* cloudbuild: Made exactly one of `filename` or `build` required on `google_cloudbuild_trigger`. [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* cloudfunctions: deprecated `nodejs6` as option for `runtime` in `function` and made it required. [MM#2499](https://github.com/GoogleCloudPlatform/magic-modules/pull/2499) -* cloudscheduler: Made exactly one of `pubsub_target`, `http_target` or `app_engine_http_target` required on `google_cloudscheduler_job`. [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* cloudiot: removed `event_notification_config` (singular) from `google_cloudiot_registry`. Use plural `event_notification_configs` instead. [MM#2390](https://github.com/GoogleCloudPlatform/magic-modules/pull/2390) -* cloudiot: Made `public_key_certificate` required on `google_cloudiot_registry. credentials `. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* cloudscheduler: Made `service_account_email` required on `google_cloudscheduler_job.http_target.oauth_token` and `google_cloudscheduler_job.http_target.oidc_token`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* composer: Made at least one of `airflow_config_overrides`, `pypi_packages`, `env_variables, `image_version`, or `python_version` required on `google_composer_environment.config.software_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* composer: Made `use_ip_aliases` required on `google_composer_environment.config.node_config.ip_allocation_policy`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* composer: Made `enable_private_endpoint` required on `google_composer_environment.config.private_environment_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* composer: Made at least one of `enable_private_endpoint` or `master_ipv4_cidr_block` required on `google_composer_environment.config.private_environment_config` [MM#2682](https://github.com/GoogleCloudPlatform/magic-modules/pull/2682) -* composer: Made at least one of `node_count`, `node_config`, `software_config` or `private_environment_config` required on `google_composer_environment.config` [MM#2682](https://github.com/GoogleCloudPlatform/magic-modules/pull/2682) -* compute: `google_compute_backend_service`'s `backend` field field now requires the `group` subfield to be set. [MM#2373](https://github.com/GoogleCloudPlatform/magic-modules/pull/2373) -* compute: permanently removed `ip_version` field from `google_compute_forwarding_rule` [MM#2436](https://github.com/GoogleCloudPlatform/magic-modules/pull/2436) -* compute: permanently removed `ipv4_range` field from `google_compute_network`. [MM#2436](https://github.com/GoogleCloudPlatform/magic-modules/pull/2436) -* compute: permanently removed `auto_create_routes` field from `google_compute_network_peering`. [MM#2436](https://github.com/GoogleCloudPlatform/magic-modules/pull/2436) -* compute: added check to only allow `google_compute_instance_template`s with 375gb scratch disks [MM#2495](https://github.com/GoogleCloudPlatform/magic-modules/pull/2495) -* compute: made `google_compute_instance_template` fail at plan time when scratch disks do not have `disk_type` `"local-ssd"`. [MM#2282](https://github.com/GoogleCloudPlatform/magic-modules/pull/2282) -* compute: removed `enable_flow_logs` field from `google_compute_subnetwork`. This is now controlled by the presence of the `log_config` block [MM#2597](https://github.com/GoogleCloudPlatform/magic-modules/pull/2597) -* compute: Made `raw_key` required on `google_compute_snapshot.snapshot_encryption_key`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* compute: Made at least one of `auto_delete`, `device_name`, `disk_encryption_key_raw`, `kms_key_self_link`, `initialize_params`, `mode` or `source` required on `google_compute_instance.boot_disk`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* compute: Made at least one of `size`, `type`, `image`, or `labels` required on `google_compute_instance.boot_disk.initialize_params`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* compute: Made at least one of `enable_secure_boot`, `enable_vtpm`, or `enable_integrity_monitoring` required on `google_compute_instance.shielded_instance_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* compute: Made at least one of `on_host_maintenance`, `automatic_restart`, `preemptible`, or `node_affinities` required on `google_compute_instance.scheduling`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* compute: Made `interface` required on `google_compute_instance.scratch_disk`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* compute: Made at least one of `enable_secure_boot`, `enable_vtpm`, or `enable_integrity_monitoring` required on `google_compute_instance_template.shielded_instance_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* compute: Made at least one of `on_host_maintenance`, `automatic_restart`, `preemptible`, or `node_affinities` are now required on `google_compute_instance_template.scheduling`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* compute: Made `kms_key_self_link` required on `google_compute_instance_template.disk.disk_encryption_key`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* compute: Made `range` required on `google_compute_router_peer. advertised_ip_ranges`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* compute: Removed `instance_template` for `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager`. Use `version.instance_template` instead. [MM#2595](https://github.com/GoogleCloudPlatform/magic-modules/pull/2595) -* compute: removed `update_strategy` for `google_compute_instance_group_manager`. Use `update_policy` instead. [MM#2595](https://github.com/GoogleCloudPlatform/magic-modules/pull/2595) -* compute: stopped allowing selfLink or path style references as IP addresses for `google_compute_forwarding_rule` or `google_compute_global_forwarding_rule` [MM#2620](https://github.com/GoogleCloudPlatform/magic-modules/pull/2620) -* compute: permanently removed `update_strategy` field from `google_compute_region_instance_group_manager`. [MM#2436](https://github.com/GoogleCloudPlatform/magic-modules/pull/2436) -* compute: Made exactly one of `http_health_check`, `https_health_check`, `http2_health_check`, `tcp_health_check` or `ssl_health_check` required on `google_compute_health_check`. [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* compute: Made exactly one of `http_health_check`, `https_health_check`, `http2_health_check`, `tcp_health_check` or `ssl_health_check` required on `google_compute_region_health_check`. [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* container: permanently removed `zone` and `region` fields from data source `google_container_engine_versions`. [MM#2436](https://github.com/GoogleCloudPlatform/magic-modules/pull/2436) -* container: permanently removed `zone`, `region` and `additional_zones` fields from `google_container_cluster`. [MM#2436](https://github.com/GoogleCloudPlatform/magic-modules/pull/2436) -* container: permanently removed `zone` and `region` fields from `google_container_node_pool`. [MM#2436](https://github.com/GoogleCloudPlatform/magic-modules/pull/2436) -* container: set `google_container_cluster`'s `logging_service` and `monitoring_service` defaults to enable GKE Stackdriver Monitoring. [MM#2471](https://github.com/GoogleCloudPlatform/magic-modules/pull/2471) -* container: removed `kubernetes_dashboard` from `google_container_cluster.addons_config` [MM#2551](https://github.com/GoogleCloudPlatform/magic-modules/pull/2551) -* container: removed automatic suppression of GPU taints in GKE `taint` [MM#2537](https://github.com/GoogleCloudPlatform/magic-modules/pull/2537) -* container: Made `disabled` required on `google_container_cluster.addons_config.http_load_balancing`, `google_container_cluster.addons_config.horizontal_pod_autoscaling`, `google_container_cluster.addons_config.network_policy_config`, `google_container_cluster.addons_config.cloudrun_config`, and `google_container_cluster.addons_config.istio_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* container: Made at least one of `http_load_balancing`, `horizontal_pod_autoscaling` , `network_policy_config`, `cloudrun_config`, or `istio_config` required on `google_container_cluster.addons_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* container: Made `enabled` required on `google_container_cluster.network_policy`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* container: Made `enable_private_endpoint` required on `google_container_cluster.private_cluster_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* container: Made `enabled` required on `google_container_cluster.vertical_pod_autoscaling`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* container: Made `cidr_blocks` required on `google_container_cluster.master_authorized_networks_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* container: Made at least one of `username`, `password` or `client_certificate_config` required on `google_container_cluster.master_auth`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* container: Made exactly one of `daily_maintenance_window` or `recurring_window` required on `google_container_cluster.maintenance_policy` [MM#2682](https://github.com/GoogleCloudPlatform/magic-modules/pull/2682) -* container: removed `google_container_cluster` `ip_allocation_policy.use_ip_aliases`. If it's set to true, remove it from your config. If false, remove `ip_allocation_policy` as a whole. [MM#2615](https://github.com/GoogleCloudPlatform/magic-modules/pull/2615) -* container: removed `google_container_cluster` `ip_allocation_policy.create_subnetwork`, `ip_allocation_policy.subnetwork_name`, `ip_allocation_policy.node_ipv4_cidr_block`. Define an explicit `google_compute_subnetwork` and use `subnetwork` instead. [MM#2615](https://github.com/GoogleCloudPlatform/magic-modules/pull/2615) -* container: Made `channel` required on `google_container_cluster.release_channel`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dataproc: Made at least one of `staging_bucket`, `gce_cluster_config`, `master_config`, `worker_config`, `preemptible_worker_config`, `software_config`, `initialization_action` or `encryption_config` required on `google_dataproc_cluster.cluster_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dataproc: Made at least one of `zone`, `network`, `subnetwork`, `tags`, `service_account`, `service_account_scopes`, `internal_ip_only` or `metadata` required on `google_dataproc_cluster.cluster_config.gce_cluster_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dataproc: Made at least one of `num_instances`, `image_uri`, `machine_type`, `min_cpu_platform`, `disk_config`, or `accelerators` required on `google_dataproc_cluster.cluster_config.master_config` and `google_dataproc_cluster.cluster_config.worker_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dataproc: Made at least one of `num_local_ssds`, `boot_disk_size_gb` or `boot_disk_type` required on `google_dataproc_cluster.cluster_config.preemptible_worker_config.disk_config`, `google_dataproc_cluster.cluster_config.master_config.disk_config` and `google_dataproc_cluster.cluster_config.worker_config.disk_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dataproc: Made at least one of `num_instances` or `disk_config` required on `google_dataproc_cluster.cluster_config.preemptible_worker_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dataproc: Made at least one of `image_version`, `override_properties` or `optional_components` is now required on `google_dataproc_cluster.cluster_config.software_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dataproc: Made `policy_uri` required on `google_dataproc_cluster.cluster_config.autoscaling_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dataproc: Made `max_failures_per_hour` required on `google_dataproc_job.scheduling`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dataproc: Made `driver_log_levels` required on `google_dataproc_job.pyspark_config.logging_config`, `google_dataproc_job.spark_config.logging_config`, `google_dataproc_job.hadoop_config.logging_config`, `google_dataproc_job.hive_config.logging_config`, `google_dataproc_job.pig_config.logging_config`, `google_dataproc_job.sparksql_config.logging_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dataproc: Made at least one of `main_class` or `main_jar_file_uri` required on `google_dataproc_job.spark_config` and `google_dataproc_job.hadoop_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dataproc: Made at least one of `query_file_uri` or `query_list` required on `google_dataproc_job.hive_config`, `google_dataproc_job.pig_config`, and `google_dataproc_job.sparksql_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dns: Made `networks` required on `google_dns_managed_zone.private_visibility_config`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* dns: Made `network_url` required on `google_dns_managed_zone.private_visibility_config.networks`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* iam: made `iam_audit_config` resources overwrite existing audit config on create. Previous implementations merged config with existing audit configs on create. [MM#2438](https://github.com/GoogleCloudPlatform/magic-modules/pull/2438) -* iam: Made exactly one of `list_policy`, `boolean_policy`, or `restore_policy` required on `google_organization_policy`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* iam: Made exactly one of `all` or `values` required on `google_organization_policy.list_policy.allow` and `google_organization_policy.list_policy.deny`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* iam: `google_project_iam_policy` can handle the `project` field in either of the following forms: `project-id` or `projects/project-id` [MM#2700](https://github.com/GoogleCloudPlatform/magic-modules/pull/2700) -* iam: Made exactly one of `allow` or `deny` required on `google_organization_policy.list_policy` [MM#2682](https://github.com/GoogleCloudPlatform/magic-modules/pull/2682) -* iam: removed the deprecated `pgp_key`, `private_key_encrypted` and `private_key_fingerprint` from `google_service_account_key` [MM#2680](https://github.com/GoogleCloudPlatform/magic-modules/pull/2680) -* monitoring: permanently removed `is_internal` and `internal_checkers` fields from `google_monitoring_uptime_check_config`. [MM#2436](https://github.com/GoogleCloudPlatform/magic-modules/pull/2436) -* monitoring: permanently removed `labels` field from `google_monitoring_alert_policy`. [MM#2436](https://github.com/GoogleCloudPlatform/magic-modules/pull/2436) -* monitoring: Made `content` required on `google_monitoring_uptime_check_config.content_matchers`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* monitoring: Made exactly one of `http_check` or `tcp_check` is now required on `google_monitoring_uptime_check_config`. [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* monitoring: Made at least one of `auth_info`, `port`, `headers`, `path`, `use_ssl`, or `mask_headers` is now required on `google_monitoring_uptime_check_config.http_check` [MM#2665](https://github.com/GoogleCloudPlatform/magic-modules/pull/2665) -* provider: added the `https://www.googleapis.com/auth/userinfo.email` scope to the provider by default [MM#2473](https://github.com/GoogleCloudPlatform/magic-modules/pull/2473) -* pubsub: removed ability to set a full path for `google_pubsub_subscription.name` (e.g. `projects/my-project/subscriptions/my-subscription`). `name` now must be the shortname (e.g. `my-subscription`) [MM#2561](https://github.com/GoogleCloudPlatform/magic-modules/pull/2561) -* resourcemanager: converted `google_folder_organization_policy` and `google_organization_policy` import format to use slashes instead of colons. [MM#2638](https://github.com/GoogleCloudPlatform/magic-modules/pull/2638) -* serviceusage: removed `google_project_services` [MM#2403](https://github.com/GoogleCloudPlatform/magic-modules/pull/2403) -* serviceusage: stopped accepting `bigquery-json.googleapis.com` in `google_project_service`. Specify `biquery.googleapis.com` instead. [MM#2626](https://github.com/GoogleCloudPlatform/magic-modules/pull/2626) -* sql: Made `name` and `value` required on `google_sql_database_instance.settings.database_flags`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* sql: Made at least one of `binary_log_enabled`, `enabled`, `start_time`, and `location` required on `google_sql_database_instance.settings.backup_configuration`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* sql: Made at least one of `authorized_networks`, `ipv4_enabled`, `require_ssl`, and `private_network` required on `google_sql_database_instance.settings.ip_configuration`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* sql: Made at least one of `day`, `hour`, and `update_track` required on `google_sql_database_instance.settings.maintenance_window`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* sql: Made at least one of `cert`, `common_name`, `create_time`, `expiration_time`, or `sha1_fingerprint` required on `google_sql_database_instance.settings.server_ca_cert`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* sql: Made at least one of `ca_certificate`, `client_certificate`, `client_key`, `connect_retry_interval`, `dump_file_path`, `failover_target`, `master_heartbeat_period`, `password`, `ssl_cipher`, `username`, and `verify_server_certificate` required on `google_sql_database_instance.settings.replica_configuration`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* sql: Made `value` required on `google_sql_database_instance.settings.ip_configuration.authorized_networks`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* storage: permanently removed `is_live` flag from `google_storage_bucket`. [MM#2436](https://github.com/GoogleCloudPlatform/magic-modules/pull/2436) -* storage: Made at least one of `main_page_suffix` or `not_found_page` required on `google_storage_bucket.website`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* storage: Made at least one of `min_time_elapsed_since_last_modification`, `max_time_elapsed_since_last_modification`, `include_prefixes`, or `exclude_prefixes` required on `google_storage_transfer_job.transfer_spec.object_conditions`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* storage: Made at least one of `overwrite_objects_already_existing_in_sink`, `delete_objects_unique_in_sink`, and `delete_objects_from_source_after_transfer` required on `google_storage_transfer_job.transfer_spec.transfer_options`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) -* storage: Made at least one of `gcs_data_source`, `aws_s3_data_source`, or `http_data_source` required on `google_storage_transfer_job.transfer_options`. [MM#2608](https://github.com/GoogleCloudPlatform/magic-modules/pull/2608) - -## 2.20.3 (March 10, 2020) - -NOTES: -* `2.20.3` is a backport release, and some changes will not appear in `3.X` series releases until `3.12.0`. -To upgrade to `3.X` you will need to perform a large jump in versions, and it is _strongly_ advised that you attempt to upgrade to `3.X` instead of using this release. -* `2.20.3` is primarily a preventative fix, in anticipation of a change in API response messages adding a default value. - -BUG FIXES: -* compute: fixed error when reading `google_compute_instance_template` resources with `network_interface[*].name` set. ([#1815](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1815)) - -## 2.20.2 (February 04, 2020) - -BUG FIXES: -* bigtable: fixed diff for DEVELOPMENT instances that are returned from the API with one node ([#1704](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1704)) - -## 2.20.1 (December 13, 2019) - -BUG FIXES: -* iam: Fixed a bug that causes badRequest errors on IAM resources due to deleted serviceAccount principals ([#1501](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1501)) - -## 2.20.2 (February 03, 2020) - -BUG FIXES: -* bigtable: fixed diff for DEVELOPMENT instances that are returned from the API with one node ([#1704](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1704)) - -## 2.20.1 (December 13, 2019) - -**Note**: 2.20.1 is a backport release. The changes in it are unavailable in 3.0.0-beta.1 through 3.2.0. - -BUG FIXES: -* iam: Fixed a bug that causes badRequest errors on IAM resources due to deleted serviceAccount principals ([#1501](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1501)) - -## 2.20.0 (November 13, 2019) - -BREAKING CHANGES: -* `google_compute_instance_iam_*` resources now support IAM Conditions. If any conditions had been created out of band before this release, take extra care to ensure they are present in your Terraform config so the provider doesn't try to create new bindings with no conditions. Terraform will show a diff that it is adding the condition to the resource, which is safe to apply. ([#1360](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1360)) -* `google_iap_app_engine_version_iam_*` resources now support IAM Conditions. If any conditions had been created out of band before this release, take extra care to ensure they are present in your Terraform config so the provider doesn't try to create new bindings with no conditions. Terraform will show a diff that it is adding the condition to the resource, which is safe to apply. ([#1352](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1352)) -* `google_iap_web_backend_service_iam_*` resources now support IAM Conditions. If any conditions had been created out of band before this release, take extra care to ensure they are present in your Terraform config so the provider doesn't try to create new bindings with no conditions. Terraform will show a diff that it is adding the condition to the resource, which is safe to apply. ([#1352](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1352)) -* `google_project_iam_*` resources now support IAM Conditions. If any conditions had been created out of band before this release, take extra care to ensure they are present in your Terraform config so the provider doesn't try to create new bindings with no conditions. Terraform will show a diff that it is adding the condition to the resource, which is safe to apply. ([#1321](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1321)) -* compute: the `backend.group` field is now required for `google_compute_region_backend_service`. Configurations without this would not have worked, so this isn't considered an API break. ([#1311](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1311)) - -FEATURES: -* **New Resource:** `google_data_fusion_instance` ([#1339](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1339)) - -IMPROVEMENTS: -* bigtable: added import support to `google_bigtable_table` ([#1350](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1350)) -* compute: `load_balancing_scheme` for `google_compute_forwarding_rule` now accepts `INTERNAL_MANAGED` as a value. ([#1311](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1311)) -* compute: added support for L7 ILB to google_compute_region_backend_service. ([#1311](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1311)) -* compute: extended backend configuration options for `google_compute_region_backend_service` to include `backend.balancing_mode`, `backend.capacity_scaler`, `backend.max_connections`, `backend.max_connections_per_endpoint`, `backend.max_connections_per_instance`, `backend.max_rate`, `backend.max_rate_per_endpoint`, `backend.max_rate_per_instance`, and `backend.max_utilization` ([#1311](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1311)) -* iam: changed the `id` for many IAM resources to the reference resource long name. Updated `instance_name` on `google_compute_instance_iam` and `subnetwork` on `google_compute_subnetwork` to their respective long names in state ([#1360](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1360)) -* iap: added support for IAM Conditions to the `google_compute_instance_iam_*` resources ([#1360](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1360)) -* iap: added support for IAM Conditions to the `google_iap_app_engine_version_iam_*` resources ([#1352](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1352)) -* iap: added support for IAM Conditions to the `google_iap_web_backend_service_iam_*` resources ([#1352](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1352)) -* logging: added `display_name` field to `google_logging_metric` resource ([#1344](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1344)) -* monitoring: Added `validate_ssl` to `google_monitoring_uptime_check_config` ([#1243](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1243)) -* project: added batching functionality to `google_project_service` read calls, so fewer API requests are made ([#1354](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1354)) -* resourcemanager: added support for IAM Conditions to the `google_project_iam_*` resources ([#1321](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1321)) -* storage: added notification_id field to `google_storage_notification` ([#1368](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1368)) - -BUG FIXES: -* compute: fixed issue where setting a 0 for `min_replicas` in `google_compute_autoscaler` and `google_compute_region_autoscaler` would set that field to its server-side default instead of 0. ([#1351](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1351)) -* dns: fixed crash when `network` blocks are defined without `network_url`s ([#1345](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1345)) -* google: used the correct update method for google_service_account.description ([#1362](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1362)) -* logging: fixed issue where logging exclusion resources silently failed when being mutated in parallel ([#1329](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1329)) - -## 2.19.0 (November 05, 2019) - -DEPRECATIONS: -* `compute`: deprecated `enable_flow_logs` on `google_compute_subnetwork`. The presence of the `log_config` block signals that flow logs are enabled for a subnetwork ([#1320](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1320)) -* `compute`: deprecated `instance_template` for `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager` . Use `version.instance_template` instead. ([#1309](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1309)) -* `compute`: deprecated `update_strategy` for `google_compute_instance_group_manager` . Use `update_policy` instead. ([#1309](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1309)) -* `container`: deprecated `google_container_cluster` `ip_allocation_policy.create_subnetwork`, `ip_allocation_policy.subnetwork_name`, `ip_allocation_policy.node_ipv4_cidr_block`. Define an explicit `google_compute_subnetwork` and use `subnetwork` instead. ([#1312](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1312)) -* `container`: deprecated `google_container_cluster` `ip_allocation_policy.use_ip_aliases`. If it's set to true, remove it from your config. If false, remove `ip_allocation_policy` as a whole. ([#1312](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1312)) -* `iam`: Deprecated `pgp_key` on `google_service_account_key` resource. See https://www.terraform.io/docs/extend/best-practices/sensitive-state.html for more information. ([#1326](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1326)) - -BREAKING CHANGES: -* `google_service_account_iam_*` resources now support IAM Conditions. If any conditions had been created out of band before this release, take extra care to ensure they are present in your Terraform config so the provider doesn't try to create new bindings with no conditions. Terraform will show a diff that it is adding the condition to the resource, which is safe to apply. ([#1188](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1188)) - -FEATURES: -* `compute`: added `google_compute_router` datasource ([#1233](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1233)) - -IMPROVEMENTS: -* `cloudbuild`: added ability to specify `name` for `cloud_build_trigger` to avoid name collisions when creating multiple triggers at once. ([#1277](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1277)) -* `compute`: added support for multiple versions of `instance_template` and granular control of the update policies for `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager`. ([#1309](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1309)) -* `container`: added `taint` field in GKE resources to the GA `google` provider ([#1296](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1296)) -* `container`: fix a diff created in the cloud console when `MaintenanceExclusions` are added. ([#1310](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1310)) -* `container`: added `maintenance_policy.recurring_window` support to `google_container_cluster`, significantly increasing expressive range. ([#1292](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1292)) -* `compute`: added `google_compute_instance` support for display device (Virtual Displays) ([#1313](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1313)) -* `iam`: added support for IAM Conditions to the `google_service_account_iam_*` resources (beta provider only) ([#1188](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1188)) -* `iam`: added `description` to `google_service_account`. ([#1291](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1291)) - -BUG FIXES: -* `appengine`: Resolved permadiff in `google_app_engine_domain_mapping.ssl_settings.certificate_id`. ([#1303](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1303)) -* `storage`: Fixed error in `google_storage_bucket` where locked retention policies would cause a bucket to report failure on all updates (even though updates were applied correctly). ([#1307](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1307)) -* `container`: Fixed nil reference to ShieldedNodes. ([#1314](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1314)) - -## 2.18.1 (October 25, 2019) - -BUGS: -* `resourcemanager`: fixed deleting the default network in `google_project` ([#1299](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1299)) - -## 2.18.0 (October 23, 2019) - -KNOWN ISSUES: -* `resourcemanager`: `google_project` `auto_create_network` is failing to delete networks when set to `false`. Use an earlier provider version to resolve. - -DEPRECATIONS: -* `container`: The `kubernetes_dashboard` addon is deprecated for `google_container_cluster`. ([#1247](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1247)) - -FEATURES: -* **New Resource:** `google_app_engine_application_url_dispatch_rules` ([#1262](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1262)) - -IMPROVEMENTS: -* `all`: increased support for custom endpoints across the provider ([#1244](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1244)) -* `appengine`: added the ability to delete the parent service of `google_app_engine_standard_app_version` ([#1222](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1222)) -* `container`: Added `shielded_instance_config` attribute to `node_config` ([#1198](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1198)) -* `container`: Allow the configuration of release channels when creating GKE clusters. ([#1260](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1260)) -* `dataflow`: added `ip_configuration` option to `job`. ([#1284](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1284)) -* `pubsub`: Added field `oidc_token` to `google_pubsub_subscription` ([#1265](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1265)) -* `sql`: added `location` field to `backup_configuration` block in `google_sql_database_instance` ([#1282](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1282)) - -BUGS: -* `all`: fixed the custom endpoint version used by older legacy REST clients ([#1274](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1274)) -* `bigquery`: fix issue with `google_bigquery_data_transfer_config` `params` crashing on boolean values ([#1263](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1263)) -* `cloudrun`: fixed the apiVersion sent in `google_cloud_run_domain_mapping` requests ([#1251](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1251)) -* `compute`: added support for updating multiple fields at once to `google_compute_subnetwork` ([#1269](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1269)) -* `compute`: fixed diffs in `google_compute_instance_group`'s `network` field when equivalent values were specified ([#1286](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1286)) -* `compute`: fixed issues updating `google_compute_instance_group`'s `instances` field when config/state values didn't match ([#1286](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1286)) -* `iam`: fixed bug where IAM binding wouldn't replace members if they were deleted outside of terraform. ([#1272](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1272)) -* `pubsub`: Fixed permadiff due to interaction of organization policies and `google_pubsub_topic`. ([#1281](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1281)) - -## 2.17.0 (October 08, 2019) - -NOTES: -* An [upgrade guide](https://www.terraform.io/docs/providers/google/version_3_upgrade.html) has been started for the upcoming 3.0.0 release. ([#1220](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1220)) -* `google_project_services` users of provider versions prior to `2.17.0` should update, as past versions of the provider will not handle an upcoming rename of `bigquery-json.googleapis.com` to `bigquery.googleapis.com` well. See https://github.com/terraform-providers/terraform-provider-google/issues/4590 for details. ([#1234](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1234)) - -DEPRECATIONS: -* `google_project_services` ([#1218](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1218)) - -FEATURES: -* **New Resource:** `google_bigtable_gc_policy` ([#1213](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1213)) -* **New Resource:** `google_binary_authorization_attestor_iam_policy` ([#1166](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1166)) -* **New Resource:** `google_compute_region_ssl_certificate` ([#1183](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1183)) -* **New Resource:** `google_compute_region_target_http_proxy` ([#1183](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1183)) -* **New Resource:** `google_compute_region_target_https_proxy` ([#1183](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1183)) -* **New Resource:** `google_iap_app_engine_service_iam_*` ([#1205](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1205)) -* **New Resource:** `google_iap_app_engine_version_iam_*` ([#1205](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1205)) -* **New Resource:** `google_storage_bucket_access_control` ([#1177](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1177)) - -IMPROVEMENTS: -* all: made `monitoring-read` scope available. ([#1208](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1208)) -* bigquery: added support for default customer-managed encryption keys (CMEK) for BigQuery datasets. ([#1081](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1081)) -* bigtable: import support added to `google_bigtable_instance` ([#1224](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1224)) -* cloudbuild: added `github` field in `google_cloudbuild_trigger`. ([#1229](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1229)) -* container: moved `default_max_pods_per_node` to ga. ([#1235](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1235)) -* containeranalysis: moved `google_containeranalysis_note` to ga ([#1166](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1166)) -* projectservice: added mitigations for bigquery-json to bigquery rename in project service resources. ([#1234](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1234)) - -BUGS: -* cloudscheduler: Fixed permadiff for `app_engine_http_target.app_engine_routing` on `google_cloud_scheduler_job` ([#1131](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1131)) -* compute: Added ability to set `quic_override` on `google_compute_https_target_proxy` to empty. ([#1219](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1219)) -* compute: Fix bug where changes to `region_backend_service.backends.failover` was not detected. ([#1236](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1236)) -* compute: fixed `google_compute_router_peer` to default if empty for `advertise_mode` ([#1163](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1163)) -* compute: fixed perma-diff in `google_compute_router_nat` when referencing subnetwork via `name` ([#1194](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1194)) -* compute: fixed perma-diff in `google_compute_router_nat` when referencing subnetwork via `name` ([#1194](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1194)) -* container: fixed an overly-aggressive validation for `master_ipv4_cidr_block` in `google_container_cluster` ([#1211](https://github.com/terraform-providers/terraform-provider-google-beta/pull/1211)) - -## 2.16.0 (September 24, 2019) - -KNOWN ISSUES: -* Based on an upstream change, users of the `google_project_services` resource may have seen the `bigquery.googleapis.com` service added and the `bigquery-json.googleapis.com` service removed, causing a diff. This was later reverted, causing another diff. This issue is being tracked as https://github.com/terraform-providers/terraform-provider-google/issues/4590. - -FEATURES: -* **New Resource**: `google_compute_region_url_map` is now available. To support this, the `protocol` for `google_compute_region_backend_service` can now be set to `HTTP`, `HTTPS`, `HTTP2`, and `SSL`. ([#1161](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1161)) -* **New Resource**: Adds `google_runtimeconfig_config_iam_*` resources ([#1138](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1138)) -* **New Resource**: Added `google_compute_resource_policy` and `google_compute_disk_resource_policy_attachment` to manage `google_compute_disk` resource policies as fine-grained resources ([#1085](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1085)) - -ENHANCEMENTS: -* composer: Add `python_version` and ability to set `image_version` in `google_composer_environment` in the GA provider ([#1143](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1143)) -* compute: `google_compute_global_forwarding_rule` now supports `metadata_filters`. ([#1160](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1160)) -* compute: `google_compute_backend_service` now supports `locality_lb_policy`, `outlier_detection`, `consistent_hash`, and `circuit_breakers`. ([#1118](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1118)) -* compute: Add support for `guest_os_features` to resource `google_compute_image` ([#1156](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1156)) -* compute: Added `drain_nat_ips` to `google_compute_router_nat` ([#1155](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1155)) -* container: google_container_node_pool now supports node_locations to specify specific node zones. ([#1154](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1154)) -* googleapis: `google_netblock_ip_ranges` data source now has a `private-googleapis` field, for the IP addresses used for Private Google Access for services that do not support VPC Service Controls API access. ([#1102](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1102)) -* project: `google_project_iam_*` Properly set the `project` field in state ([#1158](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1158)) - -BUG FIXES: -* cloudiot: Fixed error where `subfolder_matches` were not set in `google_cloudiot_registry` `event_notification_configs` ([#1175](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1175)) - -## 2.15.0 (September 17, 2019) - -FEATURES: -* **New Resource**: `google_iap_web_iam_binding/_member/_policy` are now available for managing IAP web IAM permissions ([#1044](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1044)) -* **New Resource**: `google_iap_web_backend_service_binding/_member/_policy` are now available for managing IAM permissions on IAP enabled backend services ([#1044](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1044)) -* **New Resource**: `google_iap_web_type_compute_iam_binding/_member/_policy` are now available for managing IAM permissions on IAP enabled compute services ([#1044](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1044)) -* **New Resource**: `google_iap_web_type_app_engine_iam_binding/_member/_policy` are now available for managing IAM permissions on IAP enabled App Engine applications ([#1044](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1044)) -* **New Resource**: Add the new resource `google_app_engine_domain_mapping` ([#1079](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1079)) -* **New Resource**: `google_cloudfunctions_function_iam_policy`, `google_cloudfunctions_function_iam_binding`, and `google_cloudfunctions_function_iam_member` ([#1121](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1121)) -* **New Resource**: `google_compute_reservation` allows you to reserve instance capacity in GCE. ([#1086](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1086)) -* **New Resource**: `google_compute_region_health_check` is now available. This and `google_compute_health_check` now include additional support for HTTP2 health checks. ([#1058](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1058)) - -ENHANCEMENTS: -* compute: Added full routing options to `google_compute_router_peer` ([#1104](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1104)) -* compute: add `tunnel_id` to `google_compute_vpn_tunnel` and `gateway_id` to `google_compute_vpn_gateway` ([#1106](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1106)) -* compute: `google_compute_subnetwork` now includes the `purpose` and `role` fields. ([#1051](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1051)) -* compute: add `purpose` field to `google_compute_address` ([#1115](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1115)) -* compute: add `mode` option to `google_compute_instance.boot_disk` ([#1119](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1119)) -* compute: `google_compute_firewall` does not show a diff if allowed or denied rules are specified with uppercase protocol values ([#1144](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1144)) -* compute: Add support for the `log_config` block to `compute_backend_service` (Beta only) ([#1137](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1137)) -* logging: added `metric_descriptor.unit` to `google_logging_metric` resource ([#1117](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1117)) - -BUG FIXES: -* all: More classes of generic HTTP errors are retried provider-wide. ([#1120](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1120)) -* container: Fix error when `master_authorized_networks_config` is removed from the `google_container_cluster` configuration. ([#1133](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1133)) -* iam: Make `google_service_account_` and `google_service_account_iam_*` validation less restrictive to allow for more default service accounts ([#1109](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1109)) -* iam: set auditconfigs in state for google_\*\_iam_policy resources ([#1134](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1134)) -* logging: `google_logging_metric` `explicit` bucket option can now be set ([#1096](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1096)) -* pubsub: Add retry for Pubsub Topic creation when project is still initializing org policies ([#1094](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1094)) -* servicenetworking: remove need for provider-level project to delete connection ([#1132](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1132)) -* sql: Add more retries for operationInProgress 409 errors for `google_sql_database_instance` ([#1108](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1108)) - -MISC: -* The User-Agent header that Terraform sends has been updated to correctly report the version of Terraform being run, and has minorly changed the formatting on the Terraform string. ([#1107](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1107)) - - -## 2.14.0 (August 28, 2019) - -DEPRECATIONS: -* cloudiot: `resource_cloudiot_registry`'s `event_notification_config` field has been deprecated. ([#1064](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1064)) - -FEATURES: -* **New Resource**: `google_bigtable_app_profile` is now available ([#988](https://github.com/terraform-providers/terraform-provider-google-beta/issues/988)) -* **New Resource**: `google_ml_engine_model` ([#957](https://github.com/terraform-providers/terraform-provider-google-beta/issues/957)) -* **New Resource**: `google_dataproc_autoscaling_policy` ([#1078](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1078)) -* **New Data Source**: `google_kms_secret_ciphertext` ([#1011](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1011)) - -ENHANCEMENTS: -* bigquery: Add support for clustering/partitioning to bigquery_table ([#1025](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1025)) -* bigtable: `num_nodes` can now be updated in `google_bigtable_instance` ([#1067](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1067)) -* cloudiot: `resource_cloudiot_registry` now has fields plural `event_notification_configs` and `log_level`, and `event_notification_config` has been deprecated. ([#1064](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1064)) -* cloud_run: New output-only fields have been added to google_cloud_run_service' status. ([#1071](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1071)) -* compute: Adding bandwidth attribute to interconnect attachment. ([#1016](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1016)) -* compute: `google_compute_region_instance_group_manager.update_policy` now supports `instance_redistribution_type` ([#1073](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1073)) -* compute: adds admin_enabled to google_compute_interconnect_attachment ([#1072](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1072)) -* compute: The compute routes includes next_hop_ilb attribute support in beta. ([#1076](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1076)) -* scheduler: Add support for `oauth_token` and `oidc_token` on resource `google_cloud_scheduler_job` ([#1024](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1024)) - -BUG FIXES: -* containerregistry: Correctly handle domain-scoped projects ([#1035](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1035)) -* iam: Fixed regression in 2.13.0 for permadiff on empty members in IAM policy bindings. ([#1092](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1092)) -* project: `google_project_iam_custom_role` now sets the project properly on import. ([#1089](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1089)) -* sql: Added back a missing import format for `google_sql_database`. ([#1061](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1061)) - -## 2.13.0 (August 15, 2019) - -KNOWN ISSUES: -* `bigtable`: `google_bigtable_instance` may cause a panic on Terraform `0.11`. This was resolved in `2.17.0`. - -FEATURES: -* **New Resource**: added the `google_vpc_access_connector` resource and the `vpc_connector` option on the `google_cloudfunctions_function` resource. ([#1004](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1004)) -* **New Resource**: Added `google_scc_source` resource for managing Cloud Security Command Center sources in Terraform ([#1033](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1033)) -* **New Data Source**: `google_compute_network_endpoint_group`([#999](https://github.com/terraform-providers/terraform-provider-google-beta/issues/999)) - -ENHANCEMENTS: -* bigquery: Added support for `google_bigquery_data_transfer_config` (which include scheduled queries). ([#975](https://github.com/terraform-providers/terraform-provider-google-beta/issues/975)) -* bigtable: `google_bigtable_instance` max number of `cluster` blocks is now 4 ([#995](https://github.com/terraform-providers/terraform-provider-google-beta/issues/995)) -* binary_authorization: Added `globalPolicyEvaluationMode` to `google_binary_authorization_policy`. ([#987](https://github.com/terraform-providers/terraform-provider-google-beta/issues/987)) -* cloudfunctions: Allow partial URIs in google_cloudfunctions_function event_trigger.resource ([#1009](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1009)) -* compute: Enable update for `google_compute_router_nat` ([#979](https://github.com/terraform-providers/terraform-provider-google-beta/issues/979)) -* netblock: extended `google_netblock_ip_ranges` to support multiple useful IP address ranges that have a special meaning on GCP. ([#986](https://github.com/terraform-providers/terraform-provider-google-beta/issues/986)) -* project: Wrapped API requests with retries for `google_project`, `google_folder`, and `google_*_organization_policy` ([#971](https://github.com/terraform-providers/terraform-provider-google-beta/issues/971)) -* project: IAM and service requests are now batched ([#1014](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1014)) -* provider: allow provider's region to be specified as a self_link ([#1022](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1022)) -* provider: Adds new provider-level field `user_project_override`, which allows billing, quota checks, and service enablement checks to occur against the project a resource is in instead of the project the credentials are from. ([#1010](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1010)) -* pubsub: Pub/Sub topic geo restriction support. ([#989](https://github.com/terraform-providers/terraform-provider-google-beta/issues/989)) - -BUG FIXES: -* binary_authorization: don't diff when attestation authority note public keys don't have an ID in the config ([#1042](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1042)) -* compute: instance descriptions will now be stored in state ([#990](https://github.com/terraform-providers/terraform-provider-google-beta/issues/990)) -* container: `key_name` in `google_container_cluster.database_encryption` is no longer a required field. ([#1032](https://github.com/terraform-providers/terraform-provider-google-beta/issues/1032)) -* project: ignore errors when deleting a default network that doesn't exist ([#991](https://github.com/terraform-providers/terraform-provider-google-beta/issues/991)) - -## 2.12.0 (August 01, 2019) - -FEATURES: -* **New Data Source**: `google_kms_crypto_key_version` - Provides access to KMS key version data with Google Cloud KMS. ([#964](https://github.com/terraform-providers/terraform-provider-google-beta/issues/964)) -* **New Resource**: `google_cloud_run_service` - Set up a cloud run service ([#757](https://github.com/terraform-providers/terraform-provider-google-beta/issues/757)) -* **New Resource**: `google_cloud_run_domain_mapping` - Allows custom domains to map to a cloud run service ([#757](https://github.com/terraform-providers/terraform-provider-google-beta/issues/757)) - -ENHANCEMENTS: -* binary_authorization: Add support for Cloud KMS PKIX keys to `binary_authorization_attestor`. ([#964](https://github.com/terraform-providers/terraform-provider-google-beta/issues/964)) -* composer: Add private IP config for `google_composer_environment` ([#908](https://github.com/terraform-providers/terraform-provider-google-beta/issues/908)) -* compute: add support for port_specification to resource `google_compute_health_check` ([#933](https://github.com/terraform-providers/terraform-provider-google-beta/issues/933)) -* compute: Fixed import formats for `google_compute_network_endpoint` and add location-only import formats ([#947](https://github.com/terraform-providers/terraform-provider-google-beta/issues/947)) -* compute: add support for `resource_policies` to resource `google_compute_disk` ([#960](https://github.com/terraform-providers/terraform-provider-google-beta/issues/960)) -* compute: Support labelling for compute_instance boot_disks and compute_instance_template disks. ([#982](https://github.com/terraform-providers/terraform-provider-google-beta/issues/982)) -* container: `workload_identity_config` in `google_container_cluster` can now be updated without recreating the cluster. ([#896](https://github.com/terraform-providers/terraform-provider-google-beta/issues/896)) -* container: validate that master_ipv4_cidr_block is set if enable_private_nodes is true ([#948](https://github.com/terraform-providers/terraform-provider-google-beta/issues/948)) -* dataflow: added support for user-defined `labels` on resource `google_dataflow_job` ([#970](https://github.com/terraform-providers/terraform-provider-google-beta/issues/970)) -* dataproc: add support for `optional_components` to resource `resource_dataproc_cluster` ([#961](https://github.com/terraform-providers/terraform-provider-google-beta/issues/961)) -* project: add checks to import to prevent importing by project number instead of id ([#954](https://github.com/terraform-providers/terraform-provider-google-beta/issues/954)) -* storage: add support for `retention_policy` to resource `google_storage_bucket` ([#949](https://github.com/terraform-providers/terraform-provider-google-beta/issues/949)) - -BUG FIXES: -* access_context_manager: import format checking ([#952](https://github.com/terraform-providers/terraform-provider-google-beta/issues/952)) -* dataproc: Suppress diff for `google_dataproc_cluster` `software_config.0.image_version` to prevent permadiff when server uses more specific versions of config value ([#969](https://github.com/terraform-providers/terraform-provider-google-beta/issues/969)) -* organization: Add auditConfigs to update masks for setting org and folder IAM policy (`google_organization_iam_policy`, `google_folder_iam_policy`) ([#967](https://github.com/terraform-providers/terraform-provider-google-beta/issues/967)) -* storage: `google_storage_bucket` Set website metadata during read ([#925](https://github.com/terraform-providers/terraform-provider-google-beta/issues/925)) - -## 2.11.0 (July 16, 2019) - -NOTES: -* container: We have changed the way container clusters handle cluster state, and they should now wait until the cluster is ready when creating, updating, or refreshing cluster state. This is meant to decrease the frequency of errors where Terraform is operating on a cluster that isn't ready to be operated on. If this change causes a problem, please open an issue with as much information as you can provide, especially [debug logs](https://www.terraform.io/docs/internals/debugging.html). See [terraform-provider-google #3989](https://github.com/terraform-providers/terraform-provider-google/issues/3989) for more info. - -FEATURES: -* **New Resources**: `google_bigtable_instance_iam_binding`, `google_bigtable_instance_iam_member`, and `google_bigtable_instance_iam_policy` are now available. ([#923](https://github.com/terraform-providers/terraform-provider-google-beta/issues/923)) -* **New Resources**: `google_sourcerepo_repository_iam_*` Add support for source repo repository IAM resources ([#914](https://github.com/terraform-providers/terraform-provider-google-beta/issues/914)) - -ENHANCEMENTS: -* bigquery: Added support for `external_data_configuration` to `google_bigquery_table`. ([#696](https://github.com/terraform-providers/terraform-provider-google-beta/issues/696)) -* compute: Avoid getting project if no diff found for google_compute_instance_template ([#932](https://github.com/terraform-providers/terraform-provider-google-beta/issues/932)) -* firestore: `google_firestore_index` `query_scope` can have `COLLECTION_GROUP` specified. ([#919](https://github.com/terraform-providers/terraform-provider-google-beta/issues/919)) - -BUG FIXES: -* compute: Mark instance KMS self link field kms_key_self_link as computed ([#819](https://github.com/terraform-providers/terraform-provider-google-beta/issues/819)) -* compute: Allow security policy to be removed from `google_backend_service` ([#916](https://github.com/terraform-providers/terraform-provider-google-beta/issues/916)) -* container: `google_container_cluster` deeper nil checks to prevent crash on empty object ([#934](https://github.com/terraform-providers/terraform-provider-google-beta/issues/934)) -* container: `google_container_cluster` keep clusters in state if they are created in an error state and don't get correctly cleaned up. ([#929](https://github.com/terraform-providers/terraform-provider-google-beta/issues/929)) -* container: `google_container_node_pool` Correctly set nodepool autoscaling in state when disabled in the API ([#931](https://github.com/terraform-providers/terraform-provider-google-beta/issues/931)) -* container: `google_container_cluster` will now wait to act until the cluster can be operated on, respecting timeouts. ([#927](https://github.com/terraform-providers/terraform-provider-google-beta/issues/927)) -* monitoring: Fix diff in `google_monitoring_uptime_check_config` on a deprecated field. ([#944](https://github.com/terraform-providers/terraform-provider-google-beta/issues/944)) -* service: `google_service_networking_connection` correctly delete the connection when the resource is destroyed. ([#935](https://github.com/terraform-providers/terraform-provider-google-beta/issues/935)) -* spanner: Wait for spanner databases to create before returning. Don't wait for databases to delete before returning anymore. ([#922](https://github.com/terraform-providers/terraform-provider-google-beta/issues/922)) -* storage: Fixed an issue where `google_storage_transfer_job` `schedule_end_date` caused requests to fail if unset. ([#936](https://github.com/terraform-providers/terraform-provider-google-beta/issues/936)) -* storage: `google_storage_object_acl` Prevent panic when using interpolated object names. ([#917](https://github.com/terraform-providers/terraform-provider-google-beta/issues/917)) - - -## 2.10.0 (July 02, 2019) - -DEPRECATIONS: -* monitoring: Deprecated non-existent fields `is_internal` and `internal_checkers` from `google_monitoring_uptime_check_config`. ([#888](https://github.com/terraform-providers/terraform-provider-google-beta/issues/888)) - -FEATURES: -* **New Resource**: `google_compute_project_default_network_tier` ([#882](https://github.com/terraform-providers/terraform-provider-google-beta/issues/882)) -* **New Resource** `google_healthcare_dataset_iam_binding` ([#899](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) -* **New Resource** `google_healthcare_dataset_iam_member` ([8#99](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) -* **New Resource** `google_healthcare_dataset_iam_policy` ([#899](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) -* **New Resource** `google_healthcare_dicom_store_iam_binding` ([#899](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) -* **New Resource** `google_healthcare_dicom_store_iam_member` ([#899](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) -* **New Resource** `google_healthcare_dicom_store_iam_policy` ([#899](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) -* **New Resource** `google_healthcare_fhir_store_iam_binding` ([#899](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) -* **New Resource** `google_healthcare_fhir_store_iam_member` ([#899](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) -* **New Resource** `google_healthcare_fhir_store_iam_policy` ([#899](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) -* **New Resource** `google_healthcare_hl7_v2_store_iam_binding` ([#899](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) -* **New Resource** `google_healthcare_hl7_v2_store_iam_member` ([#899](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) -* **New Resource** `google_healthcare_hl7_v2_store_iam_policy` ([#899](https://github.com/terraform-providers/terraform-provider-google-beta/pull/899)) - -ENHANCEMENTS: -* compute: Added fields for managing network endpoint group backends in `google_compute_backend_service`, including `max_connections_per_endpoint` and `max_rate_per_endpoint` ([#854](https://github.com/terraform-providers/terraform-provider-google-beta/issues/854)) -* compute: Support custom timeouts in `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager` ([#909](https://github.com/terraform-providers/terraform-provider-google-beta/issues/909)) -* container: `node_config.sandbox_config` is supported on GKE node pool definitions, allowing you to configure GKE Sandbox. ([#863](https://github.com/terraform-providers/terraform-provider-google-beta/issues/863)) -* container: `google_container_cluster` add support for GKE resource usage ([#825](https://github.com/terraform-providers/terraform-provider-google-beta/issues/825)) -* folder: `google_folder` improve error message on delete ([#878](https://github.com/terraform-providers/terraform-provider-google-beta/issues/878)) -* iam: sort bindings in `google_*_iam_policy` resources to get simpler diffs ([#881](https://github.com/terraform-providers/terraform-provider-google-beta/issues/881)) -* kms: `google_kms_crypto_key` now supports labels. ([#885](https://github.com/terraform-providers/terraform-provider-google-beta/issues/885)) -* pubsub: `google_pubsub_topic` supports KMS keys with `kms_key_name`. ([#894](https://github.com/terraform-providers/terraform-provider-google-beta/issues/894)) - -BUG FIXES: -* iam: the member field in iam_* resources is now case-insensitive ([#876](https://github.com/terraform-providers/terraform-provider-google-beta/issues/876)) -* servicenetworking: `google_service_networking_connection` fix update ([#871](https://github.com/terraform-providers/terraform-provider-google-beta/issues/871)) - -## 2.9.1 (June 21, 2019) - -BUG FIXES: -* kms: fix regression when reading existing `google_kms_crypto_key` resources ([#873](https://github.com/terraform-providers/terraform-provider-google-beta/issues/873)) -* storage: `google_storage_bucket` fix for crash that occurs when running plan on old buckets ([#870](https://github.com/terraform-providers/terraform-provider-google-beta/issues/870)) -* storage: `google_storage_bucket` allow updating bucket_policy_only to false ([#870](https://github.com/terraform-providers/terraform-provider-google-beta/issues/870)) - -## 2.9.0 (June 19, 2019) - -FEATURES: -* **Custom Endpoint Support**: The Google provider supports custom endpoints, allowing you to use GCP-like APIs such as emulators. See the [Provider Reference](https://www.terraform.io/docs/providers/google/provider_reference.html) for details. ([#811](https://github.com/terraform-providers/terraform-provider-google-beta/issues/811)) -* **New Resource**: `google_compute_resource_policy` is now available which can be used to schedule disk snapshots. ([#1850](https://github.com/GoogleCloudPlatform/magic-modules/pull/1850)) -* **New Resource**: `google_compute_external_vpn_gateway` is now available which can be used to connect to external VPN gateways. ([#833](https://github.com/terraform-providers/terraform-provider-google-beta/issues/833)) -* **New Resource** Network endpoint groups (`google_compute_network_endpoint_group`) and fine-grained resource endpoints (`google_compute_network_endpoint`) are now available. ([#781](https://github.com/terraform-providers/terraform-provider-google-beta/issues/781)) - -ENHANCEMENTS: -* increased default timeouts for `google_compute_instance`, `google_container_cluster`, `google_dataproc_cluster`, and `google_sql_database_instance` ([#862](https://github.com/terraform-providers/terraform-provider-google-beta/issues/862)) -* container: `google_container_cluster` Stop guest_accelerator from having a permadiff for accelerators with `count=0` ([#851](https://github.com/terraform-providers/terraform-provider-google-beta/issues/851)) -* container: `google_container_cluster` supports `authenticator_groups_config` to allow Google Groups-based authentication. ([#669](https://github.com/terraform-providers/terraform-provider-google-beta/issues/669)) -* container: `google_container_cluster` supports `enable_intranode_visibility`. ([#801](https://github.com/terraform-providers/terraform-provider-google-beta/issues/801)) -* container: `google_container_cluster` supports Workload Identity to access GCP APIs in GKE applications with `workload_identity_config`. ([#824](https://github.com/terraform-providers/terraform-provider-google-beta/issues/824)) -* dataproc: `google_dataproc_cluster` supports `min_cpu_platform` ([#424](https://github.com/terraform-providers/terraform-provider-google-beta/issues/424)], [[#848](https://github.com/terraform-providers/terraform-provider-google-beta/issues/848)) -* dns: `google_dns_record_set`: allow importing dns record sets in any project ([#853](https://github.com/terraform-providers/terraform-provider-google-beta/issues/853)) -* kms: `kms_crypto_key` supports `purpose` ([#845](https://github.com/terraform-providers/terraform-provider-google-beta/issues/845)) -* storage: `google_storage_bucket` now supports enabling `bucket_policy_only` access control. ([#1878](https://github.com/GoogleCloudPlatform/magic-modules/pull/1878)) -* storage: IAM resources for storage buckets (`google_storage_bucket_iam_*`) now all support import ([#835](https://github.com/terraform-providers/terraform-provider-google-beta/issues/835)) -* pubsub: `google_pubsub_topic` Updates for labels are now supported ([#832](https://github.com/terraform-providers/terraform-provider-google-beta/issues/832)) - -BUG FIXES: -* bigquery: `google_bigquery_dataset` Relax IAM role restrictions on BQ datasets ([#857](https://github.com/terraform-providers/terraform-provider-google-beta/issues/857)) -* compute: `google_project_iam` When importing resources `project` no longer needs to be set in the config post import ([#805](https://github.com/terraform-providers/terraform-provider-google-beta/issues/805)) -* compute: `google_sql_user` User's can now be updated to change their password ([#810](https://github.com/terraform-providers/terraform-provider-google-beta/issues/810)) -* compute: `google_compute_instance_template` Fixed issue so project can now be specified by interpolated varibles. ([#816](https://github.com/terraform-providers/terraform-provider-google-beta/issues/816)) -* compute: `google_compute_instance_template` Throw error when using incompatible disk fields instead of continual plan diff ([#812](https://github.com/terraform-providers/terraform-provider-google-beta/issues/812)) -* compute: `google_compute_instance_from_template` Make sure disk type is expanded to a URL ([#771](https://github.com/terraform-providers/terraform-provider-google-beta/issues/771)) -* comptue: `google_compute_instance_template` Attempt to put disks in state in the same order they were specified ([#771](https://github.com/terraform-providers/terraform-provider-google-beta/issues/771)) -* container: `google_container_cluster` and `google_node_pool` now retry correctly when polling for status of an operation. ([#818](https://github.com/terraform-providers/terraform-provider-google-beta/issues/818)) -* container: `google_container_cluster` `istio_config.auth` will no longer permadiff on `AUTH_NONE` when an auth method other than TLS is defined. ([#834](https://github.com/terraform-providers/terraform-provider-google-beta/issues/834)) -* dns: `google_dns_record_set` overrides all existing record types on create, not just NS ([#850](https://github.com/terraform-providers/terraform-provider-google-beta/issues/850)) -* monitoring: `google_monitoring_notification_channel` Allow setting enabled to false ([#864](https://github.com/terraform-providers/terraform-provider-google-beta/issues/864)) -* pubsub: `google_pubsub_subscription` and `google_pubsub_topic` resources can be created inside VPC service controls. ([#827](https://github.com/terraform-providers/terraform-provider-google-beta/issues/827)) -* redis: `google_redis_instance` Fall back to region from `location_id` when region isn't specified ([#847](https://github.com/terraform-providers/terraform-provider-google-beta/issues/847)) - -## 2.8.0 (June 04, 2019) - -DEPRECATIONS: -* compute: The `auto_create_routes` field on `google_compute_network_peering` has been deprecated because it is not user configurable. ([#3394](https://github.com/terraform-providers/terraform-provider-google/issues/3394)) - -FEATURES: -* **New Resource**: `google_compute_ha_vpn_gateway` is now available. This is an alternative to `google_compute_vpn_gateway` that can be set up to provide higher availability. ([#704](https://github.com/terraform-providers/terraform-provider-google-beta/pull/704)) -* **New Datasource**: `google_compute_ssl_certificate` ([#742](https://github.com/terraform-providers/terraform-provider-google-beta/pull/742)) -* **New Datasource**: `google_composer_image_versions` ([#752](https://github.com/terraform-providers/terraform-provider-google-beta/pull/752)) - -ENHANCEMENTS: -* app_engine: Remove restrictive `app_engine_application` location validation. ([#760](https://github.com/terraform-providers/terraform-provider-google-beta/pull/760)) -* compute: `google_compute_vpn_tunnel` supports HA fields `vpn_gateway`, `vpn_gateway_interface`, `peer_gcp_gateway`, `peer_external_gateway`, `vpn_gateway_interface` ([#704](https://github.com/terraform-providers/terraform-provider-google-beta/pull/704)) -* compute: `google_container_cluster` add support for vertical pod autoscaling ([#749](https://github.com/terraform-providers/terraform-provider-google-beta/issues/749)) -* compute: `google_compute_router_interface` now supports specifying an `interconnect_attachment`. ([#769](https://github.com/terraform-providers/terraform-provider-google-beta/pull/769)) -* compute: `google_compute_router_nat` now supports specifying a `log_config` block. ([#743](https://github.com/terraform-providers/terraform-provider-google-beta/pull/743)) -* compute: `google_compute_router_nat` now supports more import formats. ([#785](https://github.com/terraform-providers/terraform-provider-google-beta/pull/785)) -* compute: `google_compute_network_peering` now supports importing/exporting custom routes ([#754](https://github.com/terraform-providers/terraform-provider-google-beta/pull/754)) -* compute: `google_compute_backend_service` now supports self-managed internal load balancing ([#772](https://github.com/terraform-providers/terraform-provider-google-beta/issues/772)) -* compute: `google_compute_region_backend_service` now supports failover policies ([#789](https://github.com/terraform-providers/terraform-provider-google-beta/pull/789)) -* compute: Add support for INTERNAL_SELF_MANAGED backend service. Changed Resources: `google_compute_backend_service`, `google_compute_global_forwarding_rule`. ([#772](https://github.com/terraform-providers/terraform-provider-google-beta/pull/772)) -* composer: Make cloud composer environment image version updateable ([#741](https://github.com/terraform-providers/terraform-provider-google-beta/pull/741)) -* container: `google_container_cluster` now supports `vertical_pod_autoscaling` ([#733](https://github.com/terraform-providers/terraform-provider-google-beta/pull/733)) -* container: Expose the `services_ipv4_cidr` for `container_cluster`. ([#804](https://github.com/terraform-providers/terraform-provider-google-beta/pull/804)) -* dataflow: `google_dataflow_job` now supports setting machine type ([#1862](https://github.com/GoogleCloudPlatform/magic-modules/pull/1862)) -* dns: `google_dns_managed_zone` now supports DNSSec ([#737](https://github.com/terraform-providers/terraform-provider-google-beta/pull/737)) -* kms: `google_kms_key_ring` is now autogenerated. ([#748](https://github.com/terraform-providers/terraform-provider-google-beta/pull/748)) -* pubsub: `google_pubsub_subscription` supports setting an `expiration_policy` with no `ttl`. ([#783](https://github.com/terraform-providers/terraform-provider-google-beta/pull/783)) - -BUG FIXES: -* binauth: `google_binary_authorization_policy` can be used with attestors in another project. ([#778](https://github.com/terraform-providers/terraform-provider-google-beta/pull/778)) -* compute: allow setting firewall priority to 0 ([#755](https://github.com/terraform-providers/terraform-provider-google-beta/pull/755)) -* compute: Resolved an issue where `google_compute_region_backend_service` was unable to perform a state migration. ([#775](https://github.com/terraform-providers/terraform-provider-google-beta/pull/775)) -* compute: allow empty metadata.startup-script on instances ([#776](https://github.com/terraform-providers/terraform-provider-google-beta/pull/776)) -* compute: Fix flattened custom patchable resources in `google_compute_network`. ([#782](https://github.com/terraform-providers/terraform-provider-google-beta/pull/782)) -* compute: `google_compute_vpn_tunnel` now supports sending an empty external gateway interface id. ([#759](https://github.com/terraform-providers/terraform-provider-google-beta/pull/759)) -* container: allow AUTH_NONE in istio addon_config ([#664](https://github.com/terraform-providers/terraform-provider-google-beta/pull/664)) -* container: allow going from no ip_allocation_policy to a blank-equivalent one ([#774](https://github.com/terraform-providers/terraform-provider-google-beta/pull/774)) -* container: `google_container_cluster` will no longer diff unnecessarily on `issue_client_certificate`. ([#788](https://github.com/terraform-providers/terraform-provider-google-beta/pull/788)) -* container: `google_container_cluster` can enable client certificates on GKE `1.12+` series releases. ([#788](https://github.com/terraform-providers/terraform-provider-google-beta/pull/788)) -* container: `google_container_cluster` now retries the call to remove default node pools during cluster creation ([#799](https://github.com/terraform-providers/terraform-provider-google-beta/pull/799)) -* storage: Fix occasional crash when updating storage buckets ([#706](https://github.com/terraform-providers/terraform-provider-google-beta/pull/706)) - -## 2.7.0 (May 21, 2019) - -NOTE: -* Several resources were previously undocumented on the site or changelog; they should be added to both with this release. `google_compute_backend_bucket_signed_url_key` and `google_compute_backend_service_signed_url_key` were introduced in `2.4.0`. - -BACKWARDS INCOMPATIBILITIES: -* cloudfunctions: `google_cloudfunctions_function.runtime` now has an explicit default value of `nodejs6`. Users who have a different value set in the API but the value undefined in their config will see a diff. ([#697](https://github.com/terraform-providers/terraform-provider-google-beta/issues/697)) - -FEATURES: -* **New Resources**: `google_compute_instance_iam_binding`, `google_compute_instance_iam_member`, and `google_compute_instance_iam_policy` are now available. ([#685](https://github.com/terraform-providers/terraform-provider-google-beta/pull/685)) -* **New Resources**: IAM resources for Dataproc jobs and clusters (`google_dataproc_job_iam_policy`, `google_dataproc_job_iam_member`, `google_dataproc_job_iam_binding`, `google_dataproc_cluster_iam_policy`, `google_dataproc_cluster_iam_member`, `google_dataproc_cluster_iam_binding`) are now available. [#709](https://github.com/terraform-providers/terraform-provider-google-beta/pull/709) -* **New Resources**: `google_iap_tunnel_instance_iam_binding`, `google_iap_tunnel_instance_iam_member`, and `google_iap_tunnel_instance_iam_policy` are now available. ([#687](https://github.com/terraform-providers/terraform-provider-google-beta/issues/687)) - -ENHANCEMENTS: -* provider: Add GCP zone to `google_client_config` datasource ([#668](https://github.com/terraform-providers/terraform-provider-google-beta/issues/668)) -* compute: Add support for creating instances with CMEK ([#698](https://github.com/terraform-providers/terraform-provider-google-beta/issues/698)) -* compute: Can now specify project when importing instance groups. -* compute: `google_compute_instance` now supports `shielded_instance_config` for verifiable integrity of your VM instances. ([#711](https://github.com/terraform-providers/terraform-provider-google-beta/issues/711)) -* compute: `google_compute_backend_service` now supports `HTTP2` protocol (beta API feature) [#708](https://github.com/terraform-providers/terraform-provider-google-beta/pull/708) -* compute: `google_compute_instance_template` now supports `shielded_instance_config` for verifiable integrity of your VM instances. ([#711](https://github.com/terraform-providers/terraform-provider-google-beta/issues/711)) -* container: use the cluster subnet to look up the node cidr block ([#722](https://github.com/terraform-providers/terraform-provider-google-beta/issues/722)) - -BUG FIXES: -* cloudfunctions: `google_cloudfunctions_function.runtime` now has an explicit default value of `nodejs6`. ([#697](https://github.com/terraform-providers/terraform-provider-google-beta/issues/697)) -* monitoring: updating `google_monitoring_alert_policy` is more likely to succeed ([#684](https://github.com/terraform-providers/terraform-provider-google-beta/issues/684)) -* kms: `google_kms_crypto_key` now (in addition to marking all crypto key versions for destruction) correctly disables auto-rotation for destroyed keys ([#705](https://github.com/terraform-providers/terraform-provider-google-beta/issues/705)) -* iam: Increase IAM custom role length validation to match API. ([#728](https://github.com/terraform-providers/terraform-provider-google-beta/issues/728)) - -## 2.6.0 (May 07, 2019) - -KNOWN ISSUES: -* cloudfunctions: `google_cloudfunctions_function`s without a `runtime` set will fail to create due to an upstream API change. You can work around this by setting an explicit `runtime` in `2.X` series releases. - -DEPRECATIONS: -* monitoring: `google_monitoring_alert_policy` `labels` was deprecated, as the field was never used and it was typed incorrectly. ([#635](https://github.com/terraform-providers/terraform-provider-google-beta/issues/635)) - -FEATURES: -* **New Datasource**: `google_compute_node_types` for sole-tenant node types is now available. ([#614](https://github.com/terraform-providers/terraform-provider-google-beta/pull/614)) -* **New Resource**: `google_compute_node_group` for sole-tenant nodes is now available. ([#643](https://github.com/terraform-providers/terraform-provider-google-beta/pull/643)) -* **New Resource**: `google_compute_node_template` for sole-tenant nodes is now available. ([#614](https://github.com/terraform-providers/terraform-provider-google-beta/pull/614)) -* **New Resource**: `google_firestore_index` is now available to configure composite indexes on Firestore. ([#632](https://github.com/terraform-providers/terraform-provider-google-beta/issues/632)) -* **New Resource**: `google_logging_metric` is now available to configure Stackdriver logs-based metrics. ([#1702](https://github.com/GoogleCloudPlatform/magic-modules/pull/1702)) -* **New Resource**: `google_compute_network_endpoint_group` ([#630](https://github.com/terraform-providers/terraform-provider-google-beta/issues/630)) -* **New Resource**: `google_security_scanner_scan_config` is now available for configuring scan runs with Cloud Security Scanner. ([#641](https://github.com/terraform-providers/terraform-provider-google-beta/issues/641)) - -ENHANCEMENTS: -* compute: `google_compute_subnetwork` now supports `log_config` to configure flow logs' logging behaviour. ([#619](https://github.com/terraform-providers/terraform-provider-google-beta/issues/619)) -* container: `google_container_cluster` now supports `database_encryption` to configure etcd encryption. ([#649](https://github.com/terraform-providers/terraform-provider-google-beta/issues/649)) -* dataflow: `google_dataflow_job`'s `network` and `subnetwork` can be configured. ([#631](https://github.com/terraform-providers/terraform-provider-google-beta/issues/631)) -* monitoring: `google_monitoring_alert_policy` `user_labels` support was added. ([#635](https://github.com/terraform-providers/terraform-provider-google-beta/issues/635)) -* compute: `google_compute_region_backend_service` is now generated with Magic Modules, adding configurable timeouts, multiple import formats, `creation_timestamp` output. ([#645](https://github.com/terraform-providers/terraform-provider-google-beta/issues/645)) -* compute: `iam_compute_subnetwork` is now GA. ([#656](https://github.com/terraform-providers/terraform-provider-google-beta/issues/656)) -* pubsub: `google_pubsub_subscription` now supports setting an `expiration_policy`. ([#1703](https://github.com/GoogleCloudPlatform/magic-modules/pull/1703)) - -BUG FIXES: -* bigquery: `google_bigquery_table` will work with a larger range of projects id formats. ([#658](https://github.com/terraform-providers/terraform-provider-google-beta/issues/658)) -* cloudfunctions: `google_cloudfunctions_fucntion` no longer restricts an outdated list of `region`s ([#659](https://github.com/terraform-providers/terraform-provider-google-beta/issues/659)) -* compute: `google_compute_instance` now retries updating metadata when fingerprints are mismatched. ([#583](https://github.com/terraform-providers/terraform-provider-google-beta/issues/583)) -* compute: `google_compute_instance` and `google_compute_instance_template` now support node affinities for scheduling on sole tenant nodes [[#663](https://github.com/terraform-providers/terraform-provider-google-beta/issues/663)](https://github.com/terraform-providers/terraform-provider-google-beta/pull/663) -* compute: `google_compute_managed_ssl_certificate` will no longer diff when using an absolute FQDN. ([#591](https://github.com/terraform-providers/terraform-provider-google-beta/issues/591)) -* compute: `google_compute_disk` resources using `google-beta` will properly detach users at deletion instead of failing. ([#640](https://github.com/terraform-providers/terraform-provider-google-beta/issues/640)) -* compute: `google_compute_subnetwork.secondary_ip_ranges` doesn't cause a diff on out of band changes, allows updating to empty list of ranges. ([#3496](https://github.com/terraform-providers/terraform-provider-google-beta/issues/3496)) -* container: `google_container_cluster` setting networks / subnetworks by name works with `location`. ([#634](https://github.com/terraform-providers/terraform-provider-google-beta/issues/634)) -* container: `google_container_cluster` removed an overly restrictive validation restricting `node_pool` and `remove_default_node_pool` being specified at the same time. ([#637](https://github.com/terraform-providers/terraform-provider-google-beta/issues/637)) -* storage: `data_source_google_storage_bucket_object` now correctly URL encodes the slashes in a file name ([#587](https://github.com/terraform-providers/terraform-provider-google-beta/issues/587)) - -## 2.5.1 (April 22, 2019) - -BUG FIXES: -* compute: `google_compute_backend_service` handles empty/nil `iap` block created by previous providers properly. ([#622](https://github.com/terraform-providers/terraform-provider-google-beta/issues/622)) -* compute: `google_compute_backend_service` allows multiple instance types in `backends.group` again. ([#625](https://github.com/terraform-providers/terraform-provider-google-beta/issues/625)) -* dns: `google_dns_managed_zone` does not permadiff when visiblity is set to default and returned as empty from API ([#624](https://github.com/terraform-providers/terraform-provider-google-beta/issues/624)) -* google_projects: Datasource `google_projects` now handles paginated results from listing projects ([#626](https://github.com/terraform-providers/terraform-provider-google-beta/pull/626)) -* google_project_iam: `google_project_iam_policy/member/binding` now attempts to retry for read-only operations as well as retrying read-write operations([#620](https://github.com/terraform-providers/terraform-provider-google-beta/pull/620)) -* kms: `google_kms_crypto_key.rotation_period` now can be an empty string to allow for unset behavior in modules ([#627](https://github.com/terraform-providers/terraform-provider-google-beta/pull/627)) - -## 2.5.0 (April 18, 2019) - - -KNOWN ISSUES: -* compute: `google_compute_subnetwork` will fail to reorder `secondary_ip_range` values at apply time -* compute: `google_compute_subnetwork`s used with a VPC-native GKE cluster will have a diff if that cluster creates secondary ranges automatically. - -BACKWARDS INCOMPATIBILITIES: -* all: This is the first release to use the 0.12 SDK required for Terraform 0.12 support. Some provider behaviour may have changed as a result of changes made by the new SDK version. -* compute: `google_compute_instance_group` will not reconcile instances recreated within the same `terraform apply` due to underlying `0.12` SDK changes in the provider. ([#616](https://github.com/terraform-providers/terraform-provider-google-beta/issues/616)) -* compute: `google_compute_subnetwork` will have a diff if `secondary_ip_range` values defined in config don't exactly match real state; if so, they will need to be reconciled. ([#3432](https://github.com/terraform-providers/terraform-provider-google-beta/issues/3432)) -* container: `google_container_cluster` will have a diff if `master_authorized_networks.cidr_blocks` defined in config doesn't exactly match the real state; if so, it will need to be reconciled. ([#603](https://github.com/terraform-providers/terraform-provider-google-beta/issues/603)) - - -BUG FIXES: -* container: `google_container_cluster` catch out of band changes to `master_authorized_networks.cidr_blocks`. ([#603](https://github.com/terraform-providers/terraform-provider-google-beta/issues/603)) - - -## 2.4.1 (April 30, 2019) - -NOTES: This 2.4.1 release is a bugfix release for 2.4.0. It backports the fixes applied in the 2.5.1 release to the 2.4.0 series. - -BUG FIXES: -* compute: `google_compute_backend_service` handles empty/nil `iap` block created by previous providers properly. ([#622](https://github.com/terraform-providers/terraform-provider-google-beta/issues/622)) -* compute: `google_compute_backend_service` allows multiple instance types in `backends.group` again. ([#625](https://github.com/terraform-providers/terraform-provider-google-beta/issues/625)) -* dns: `google_dns_managed_zone` does not permadiff when visiblity is set to default and returned as empty from API ([#624](https://github.com/terraform-providers/terraform-provider-google-beta/issues/624)) - -## 2.4.0 (April 15, 2019) - -KNOWN ISSUES: - -* compute: `google_compute_backend_service` resources created with past provider versions won't work with `2.4.0`. You can pin your provider version or manually delete them and recreate them until this is resolved. (https://github.com/terraform-providers/terraform-provider-google/issues/3441) -* dns: `google_dns_managed_zone.visibility` will cause a diff if set to `public`. Setting it to `""` (defaulting to public) will work around this. (https://github.com/terraform-providers/terraform-provider-google/issues/3435) - -BACKWARDS INCOMPATIBILITIES: -* accesscontextmanager: `google_access_context_manager_service_perimeter` `unrestricted_services` field was removed based on a removal in the underlying API. ([#576](https://github.com/terraform-providers/terraform-provider-google-beta/issues/576)) - -FEATURES: -* **New Resource**: `google_compute_backend_bucket_signed_url_key` is now available. ([#530](https://github.com/terraform-providers/terraform-provider-google-beta/issues/530)) -* **New Resource**: `google_compute_backend_service_signed_url_key` is now available. ([#577](https://github.com/terraform-providers/terraform-provider-google-beta/issues/577)) -* **New Datasource**: `google_service_account_access_token` is now available. ([#575](https://github.com/terraform-providers/terraform-provider-google-beta/issues/575)) - -ENHANCEMENTS: -* compute: `google_compute_backend_service` is now generated with Magic Modules, adding configurable timeouts, multiple import formats, `creation_timestamp` output. ([#569](https://github.com/terraform-providers/terraform-provider-google-beta/issues/569)) -* compute: `google_compute_backend_service` now supports `load_balancing_scheme` and `cdn_policy.signed_url_cache_max_age_sec`. ([#584](https://github.com/terraform-providers/terraform-provider-google-beta/issues/584)) -* compute: `google_compute_network` now supports `delete_default_routes_on_create` to delete pre-created routes at network creation time. ([#592](https://github.com/terraform-providers/terraform-provider-google-beta/issues/592)) -* compute: `google_compute_autoscaler` now supports `metric.single_instance_assignment` ([#580](https://github.com/terraform-providers/terraform-provider-google-beta/issues/580)) -* dns: `google_dns_policy` now supports `enable_logging`. ([#573](https://github.com/terraform-providers/terraform-provider-google-beta/issues/573)) -* dns: `google_dns_managed_zone` now supports `peering_config` to enable DNS Peering. ([#572](https://github.com/terraform-providers/terraform-provider-google-beta/issues/572)) - -BUG FIXES: -* container: `google_container_cluster` will ignore out of band changes on `node_ipv4_cidr_block`. ([#558](https://github.com/terraform-providers/terraform-provider-google-beta/issues/558)) -* container: `google_container_cluster` will now reject config with both `node_pool` and `remove_default_node_pool` defined ([#600](https://github.com/terraform-providers/terraform-provider-google-beta/issues/600)) -* container: `google_container_cluster` will allow >20 `cidr_blocks` in `master_authorized_networks_config`. ([#594](https://github.com/terraform-providers/terraform-provider-google-beta/issues/594)) -* netblock: `data.google_netblock_ip_ranges.cidr_blocks` will better handle ipv6 input. ([#590](https://github.com/terraform-providers/terraform-provider-google-beta/issues/590)) -* sql: `google_sql_database_instance` will retry reads during Terraform refreshes if it hits a rate limit. ([#579](https://github.com/terraform-providers/terraform-provider-google-beta/issues/579)) - -## 2.3.0 (March 26, 2019) - -DEPRECATIONS: -* container: `google_container_cluster` `zone` and `region` fields are deprecated in favour of `location`, `additional_zones` in favour of `node_locations`. ([#461](https://github.com/terraform-providers/terraform-provider-google-beta/issues/461)) -* container: `google_container_node_pool` `zone` and `region` fields are deprecated in favour of `location`. ([#461](https://github.com/terraform-providers/terraform-provider-google-beta/issues/461)) -* container: `data.google_container_cluster` `zone` and `region` fields are deprecated in favour of `location`. ([#461](https://github.com/terraform-providers/terraform-provider-google-beta/issues/461)) -* container: `google_container_engine_versions` `zone` and `region` fields are deprecated in favour of `location`. ([#461](https://github.com/terraform-providers/terraform-provider-google-beta/issues/461)) - -FEATURES: -* **New Datasource**: `google_*_organization_policy` Adding datasources for folder and project org policy ([#468](https://github.com/terraform-providers/terraform-provider-google-beta/issues/468)) - -ENHANCEMENTS: -* compute: `google_compute_disk`, `google_compute_region_disk` now support `physical_block_size_bytes` ([#526](https://github.com/terraform-providers/terraform-provider-google-beta/issues/526)) -* compute: `google_compute_vpn_tunnel will properly apply labels. ([#541](https://github.com/terraform-providers/terraform-provider-google-beta/issues/541)) -* container: `google_container_cluster` adds a unified `location` field for regions and zones, `node_locations` to manage extra zones for multi-zonal clusters and specific zones for regional clusters. ([#461](https://github.com/terraform-providers/terraform-provider-google-beta/issues/461)) -* container: `google_container_node_pool` adds a unified `location` field for regions and zones. ([#461](https://github.com/terraform-providers/terraform-provider-google-beta/issues/461)) -* container: `data.google_container_cluster` adds a unified `location` field for regions and zones. ([#461](https://github.com/terraform-providers/terraform-provider-google-beta/issues/461)) -* container: `google_container_engine_versions` adds a unified `location` field for regions and zones. ([#461](https://github.com/terraform-providers/terraform-provider-google-beta/issues/461)) -* dataflow: `google_dataflow_job` has support for custom service accounts with `service_account_email`. ([#527](https://github.com/terraform-providers/terraform-provider-google-beta/issues/527)) -* monitoring: `google_monitoring_uptime_check` will properly recreate to perform updates. ([#485](https://github.com/terraform-providers/terraform-provider-google-beta/issues/485)) -* resourcemanager: `google_*_organization_policy` Add import support for folder and project organization_policies ([#512](https://github.com/terraform-providers/terraform-provider-google-beta/issues/512)) -* sql: `google_sql_ssl_cert` Allow project to be specified at resource level ([#524](https://github.com/terraform-providers/terraform-provider-google-beta/issues/524)) -* storage: `google_storage_bucket` avoids calls to the compute api during import ([#529](https://github.com/terraform-providers/terraform-provider-google-beta/issues/529)) -* storage: `google_storage_bucket.storage_class` supports updating. ([#548](https://github.com/terraform-providers/terraform-provider-google-beta/issues/548)) -* various: Some import formats that previously failed will now work as documented. ([#542](https://github.com/terraform-providers/terraform-provider-google-beta/issues/542)) - -BUG FIXES: -* compute: `google_compute_disk` will properly detach instances again. ([#538](https://github.com/terraform-providers/terraform-provider-google-beta/issues/538)) -* container: `google_container_cluster`, `google_container_node_pool` properly suppress new GKE `1.12` `metadata` values. ([#522](https://github.com/terraform-providers/terraform-provider-google-beta/issues/522)) -* various: Only 409 concurrent operation errors will be retried, and naming conflicts will not. ([#544](https://github.com/terraform-providers/terraform-provider-google-beta/issues/544)) - -## 2.2.0 (March 12, 2019) - -KNOWN ISSUES: - -* compute: `google_compute_disk` is unable to detach instances at deletion time. - ---- - -FEATURES: -* **New Datasource**: `data.google_projects` for retrieving a list of projects based on a filter. ([#493](https://github.com/terraform-providers/terraform-provider-google-beta/issues/493)) -* **New Resource**: `google_tpu_node` for Cloud TPU Nodes ([#494](https://github.com/terraform-providers/terraform-provider-google-beta/issues/494)) -* **New Resource**: `google_dns_policy` for Cloud DNS policies. ([#488](https://github.com/terraform-providers/terraform-provider-google-beta/pull/488)) - -ENHANCEMENTS: -* compute: `google_compute_disk` and `google_compute_region_disk` will now detach themselves from a more up to date set of users at delete time. ([#480](https://github.com/terraform-providers/terraform-provider-google-beta/issues/480)) -* compute: `google_compute_network` is now generated by Magic Modules, supporting configurable timeouts and more import formats. ([#509](https://github.com/terraform-providers/terraform-provider-google-beta/issues/509)) -* compute: `google_compute_firewall` will validate the maximum size of service account lists at plan time. ([#508](https://github.com/terraform-providers/terraform-provider-google-beta/issues/508)) -* container: `google_container_cluster` can now disable VPC Native clusters with `ip_allocation_policy.use_ip_aliases` ([#489](https://github.com/terraform-providers/terraform-provider-google-beta/issues/489)) -* container: `data.google_container_engine_versions` supports `version_prefix` to allow fuzzy version matching. Using this field, Terraform can match the latest version of a major, minor, or patch release. ([#506](https://github.com/terraform-providers/terraform-provider-google-beta/issues/506)) -* pubsub: `google_pubsub_subscription` now supports configuring `message_retention_duration` and `retain_acked_messages`. ([#503](https://github.com/terraform-providers/terraform-provider-google-beta/issues/503)) - -BUG FIXES: -* app_engine: `google_app_engine_application` correctly outputs `gcr_domain`. ([#479](https://github.com/terraform-providers/terraform-provider-google-beta/issues/479)) -* compute: `data.google_compute_subnetwork` outputs the `self_link` field again. ([#481](https://github.com/terraform-providers/terraform-provider-google-beta/issues/481)) -* compute: `google_compute_attached_disk` is now removed from state if the instance was removed. ([#497](https://github.com/terraform-providers/terraform-provider-google-beta/issues/497)) -* container: `google_container_cluster` private_cluster_config now has a diff suppress to prevent a permadiff for and allows for empty `master_ipv4_cidr_block` ([#460](https://github.com/terraform-providers/terraform-provider-google-beta/issues/460)) -* container: `google_container_cluster` import behavior fixed/documented for TF-state-only fields (`remove_default_node_pool`, `min_master_version`) ([#476](https://github.com/terraform-providers/terraform-provider-google-beta/issues/476)][[#487](https://github.com/terraform-providers/terraform-provider-google-beta/issues/487)][[#495](https://github.com/terraform-providers/terraform-provider-google-beta/issues/495)) -* storagetransfer: `google_storage_transfer_job` will no longer crash when accessing nil dates. ([#499](https://github.com/terraform-providers/terraform-provider-google-beta/issues/499)) - -## 2.1.0 (February 26, 2019) - -FEATURES: -* **New Resource**: Add support for `google_compute_managed_ssl_certificate`. ([#458](https://github.com/terraform-providers/terraform-provider-google-beta/issues/458)) -* **New Datasource**: `google_client_openid_userinfo` for retrieving the `email` used to authenticate with GCP. ([#459](https://github.com/terraform-providers/terraform-provider-google-beta/issues/459)) - -ENHANCEMENTS: -* compute: `data.google_compute_subnetwork` can now be addressed by `self_link` as an alternative to the existing `name`/`region`/`project` fields. ([#429](https://github.com/terraform-providers/terraform-provider-google-beta/issues/429)) -* dns: Support for privately visible zones is added to `google_dns_managed_zone`. ([#268](https://github.com/terraform-providers/terraform-provider-google-beta/issues/268)) -* pubsub: `google_pubsub_topic` is now generated using Magic Modules, adding Open in Cloud Shell examples, configurable timeouts, and the `labels` field. ([#432](https://github.com/terraform-providers/terraform-provider-google-beta/issues/432)) -* pubsub: `google_pubsub_subscription` is now generated using Magic Modules, adding Open in Cloud Shell examples, configurable timeouts, update support, and the `labels` field. ([#432](https://github.com/terraform-providers/terraform-provider-google-beta/issues/432)) -* sql: `google_sql_database_instance` now provides `public_ip_address` and `private_ip_address` outputs of the first public and private IP of the instance respectively. ([#454](https://github.com/terraform-providers/terraform-provider-google-beta/issues/454)) - - -BUG FIXES: -* sql: `google_sql_database_instance` allows the empty string to be set for `private_network`. ([#454](https://github.com/terraform-providers/terraform-provider-google-beta/issues/454)) - -## 2.0.0 (February 12, 2019) - -BACKWARDS INCOMPATIBILITIES: -* bigtable: `google_bigtable_instance` `zone` field is no longer inferred from the provider. -* bigtable: `google_bigtable_table` now reads `family` from the table's column family in Cloud Bigtable instead of creating a new column family ([#70](https://github.com/terraform-providers/terraform-provider-google-beta/issues/70)) -* bigtable: `google_bigtable_instance.cluster.num_nodes` will fail at plan time if `DEVELOPMENT` instances have `num_nodes = "0"` set explicitly. If it has been set, unset the field. ([#82](https://github.com/terraform-providers/terraform-provider-google-beta/issues/82)) -* cloudbuild: `google_cloudbuild_trigger.build.step.args` is now a list instead of space separated strings. ([#308](https://github.com/terraform-providers/terraform-provider-google-beta/issues/308)) -* cloudfunctions: `google_cloudfunctions_function.retry_on_failure` has been removed. Use `event_trigger.failure_policy.retry` instead. ([#75](https://github.com/terraform-providers/terraform-provider-google-beta/issues/75)) -* cloudfunctions: `google_cloudfunctions_function.trigger_bucket` and `google_cloudfunctions_function.trigger_topic` have been removed. Use `event trigger` instead. ([#30](https://github.com/terraform-providers/terraform-provider-google-beta/issues/30)) -* composer: `google_composer_environment.node_config.zone` is now `Required`. ([#396](https://github.com/terraform-providers/terraform-provider-google-beta/issues/396)) -* compute: `google_compute_instance`, `google_compute_instance_from_template` `metadata` field is now authoritative and will remove values not explicitly set in config. [[#2208](https://github.com/terraform-providers/terraform-provider-google-beta/issues/2208)](https://github.com/terraform-providers/terraform-provider-google/pull/2208) -* compute: `google_compute_region_instance_group_manager` field `update_strategy` is now deprecated in the beta provider only. It will only function in the `google` provider, ([#76](https://github.com/terraform-providers/terraform-provider-google-beta/issues/76)) -* compute: `google_compute_global_forwarding_rule` field `labels` is now removed ([#81](https://github.com/terraform-providers/terraform-provider-google-beta/issues/81)) -* compute: `google_compute_project_metadata` resource is now authoritative and will remove values not explicitly set in config. [[#2205](https://github.com/terraform-providers/terraform-provider-google-beta/issues/2205)](https://github.com/terraform-providers/terraform-provider-google/pull/2205) -* compute: `google_compute_url_map` resource is now authoritative and will remove values not explicitly set in config. [[#2245](https://github.com/terraform-providers/terraform-provider-google-beta/issues/2245)](https://github.com/terraform-providers/terraform-provider-google/pull/2245) -* compute: `google_compute_snapshot.snapshot_encryption_key_raw`, `google_compute_snapshot.snapshot_encryption_key_sha256`, `google_compute_snapshot.source_disk_encryption_key_raw`, `google_compute_snapshot.source_disk_encryption_key_sha256` fields are now removed. Use `google_compute_snapshot.snapshot_encryption_key.0.raw_key`, `google_compute_snapshot.snapshot_encryption_key.0.sha256`, `google_compute_snapshot.source_disk_encryption_key.0.raw_key`, `google_compute_snapshot.source_disk_encryption_key.0.sha256` instead. ([#202](https://github.com/terraform-providers/terraform-provider-google-beta/issues/202)) -* compute: `google_compute_instance_group_manager` is no longer imported by the provider-level region. Set the appropriate provider-level zone instead. ([#248](https://github.com/terraform-providers/terraform-provider-google-beta/issues/248)) -* compute: `google_compute_region_instance_group_manager.update_strategy` in the `google-beta` provider has been removed. ([#189](https://github.com/terraform-providers/terraform-provider-google-beta/issues/189)) -* compute: `google_compute_instance`, `google_compute_instance_template`, `google_compute_instance_from_template` have had the `network_interface.address` field removed. ([#190](https://github.com/terraform-providers/terraform-provider-google-beta/issues/190)) -* compute: `google_compute_instance` has had the `network_interface.access_config.assigned_nat_ip` field removed ([#48](https://github.com/terraform-providers/terraform-provider-google-beta/issues/48)) -* compute: `google_compute_disk` is no longer imported by the provider-level region. Set the appropriate provider-level zone instead. ([#249](https://github.com/terraform-providers/terraform-provider-google-beta/issues/249)) -* compute: `google_compute_router_nat.subnetwork.source_ip_ranges_to_nat` is now Required inside `subnetwork` blocks. ([#281](https://github.com/terraform-providers/terraform-provider-google-beta/issues/281)) -* compute: `google_compute_ssl_certificate`'s `private_key` field is no longer stored in state in cleartext; it is now SHA256 encoded. ([#400](https://github.com/terraform-providers/terraform-provider-google-beta/issues/400)) -* container: `google_container_cluster` fields (`private_cluster`, `master_ipv4_cidr_block`) are removed. Use `private_cluster_config` and `private_cluster_config.master_ipv4_cidr_block` instead. ([#78](https://github.com/terraform-providers/terraform-provider-google-beta/issues/78)) -* container: `google_container_node_pool`'s `name_prefix` field has been restored and is no longer deprecated. ([#2975](https://github.com/terraform-providers/terraform-provider-google-beta/issues/2975)) -* sql: `google_sql_database_instance` resource is now authoritative and will remove values not explicitly set in config. [[#2203](https://github.com/terraform-providers/terraform-provider-google-beta/issues/2203)](https://github.com/terraform-providers/terraform-provider-google/pull/2203) -* endpoints: `google_endpoints_service.protoc_output` was removed. Use `google_endpoints_service.protoc_output_base64` instead. ([#79](https://github.com/terraform-providers/terraform-provider-google-beta/issues/79)) -* resourcemanager: `google_project_iam_policy` is now authoritative and will remove values not explicitly set in config. Several fields were removed that made it authoritative: `authoritative`, `restore_policy`, and `disable_project`. This resource is very dangerous! Ensure you are not using the removed fields (`authoritative`, `restore_policy`, `disable_project`). ([#25](https://github.com/terraform-providers/terraform-provider-google-beta/issues/25)) -* resourcemanager: Datasource `google_service_account_key.service_account_id` has been removed. Use the `name` field instead. ([#80](https://github.com/terraform-providers/terraform-provider-google-beta/issues/80)) -* resourcemanager: `google_project.app_engine` has been removed. Use the `google_app_engine_application` resource instead. ([#74](https://github.com/terraform-providers/terraform-provider-google-beta/issues/74)) -* resourcemanager: `google_organization_custom_role.deleted` is now an output-only attribute. Use `terraform destroy`, or remove the resource from your config instead. ([#191](https://github.com/terraform-providers/terraform-provider-google-beta/issues/191)) -* resourcemanager: `google_project_custom_role.deleted` is now an output-only attribute. Use `terraform destroy`, or remove the resource from your config instead. ([#199](https://github.com/terraform-providers/terraform-provider-google-beta/issues/199)) -* serviceusage: `google_project_service` will now error instead of silently disabling dependent services if `disable_dependent_services` is unset. ([#384](https://github.com/terraform-providers/terraform-provider-google-beta/issues/384)) -* storage: `google_storage_object_acl.role_entity` is now authoritative and will remove values not explicitly set in config. Use `google_storage_object_access_control` for fine-grained management. ([#26](https://github.com/terraform-providers/terraform-provider-google-beta/issues/26)) -* storage: `google_storage_default_object_acl.role_entity` is now authoritative and will remove values not explicitly set in config. ([#47](https://github.com/terraform-providers/terraform-provider-google-beta/issues/47)) -* iam: `google_*_iam_binding` Change all IAM bindings to be authoritative ([#291](https://github.com/terraform-providers/terraform-provider-google-beta/issues/291)) - -FEATURES: -* **New Resource**: `google_access_context_manager_access_policy` for managing the container for an organization's access levels. ([#96](https://github.com/terraform-providers/terraform-provider-google-beta/issues/96)) -* **New Resource**: `google_access_context_manager_access_level` for managing an organization's access levels. ([#149](https://github.com/terraform-providers/terraform-provider-google-beta/issues/149)) -* **New Resource**: `google_access_context_manager_service_perimeter` for managing service perimeters in an access policy. ([#246](https://github.com/terraform-providers/terraform-provider-google-beta/issues/246)) -* **New Resource**: `google_app_engine_firewall_rule` ([#271](https://github.com/terraform-providers/terraform-provider-google-beta/issues/271)][[#336](https://github.com/terraform-providers/terraform-provider-google-beta/issues/336)) -* **New Resource**: `google_monitoring_group` ([#120](https://github.com/terraform-providers/terraform-provider-google-beta/issues/120)) -* **New Resource**: `google_project_iam_audit_config` ([#265](https://github.com/terraform-providers/terraform-provider-google-beta/issues/265)) -* **New Resource**: `google_storage_transfer_job` for managing recurring storage transfers with Google Cloud Storage. ([#256](https://github.com/terraform-providers/terraform-provider-google-beta/issues/256)) -* **New Resource**: `google_cloud_scheduler_job` for managing the cron job scheduling service with Google Cloud Scheduler. ([#378](https://github.com/terraform-providers/terraform-provider-google-beta/issues/378)) -* **New Datasource**: `google_storage_bucket_object` ([#223](https://github.com/terraform-providers/terraform-provider-google-beta/issues/223)) -* **New Datasource**: `google_storage_transfer_project_service_account` data source for retrieving the Storage Transfer service account for a project ([#247](https://github.com/terraform-providers/terraform-provider-google-beta/issues/247)) -* **New Datasource**: `google_kms_crypto_key` ([#359](https://github.com/terraform-providers/terraform-provider-google-beta/issues/359)) -* **New Datasource**: `google_kms_key_ring` ([#359](https://github.com/terraform-providers/terraform-provider-google-beta/issues/359)) - -ENHANCEMENTS: -* provider: Add `access_token` config option to allow Terraform to authenticate using short-lived Google OAuth 2.0 access token ([#330](https://github.com/terraform-providers/terraform-provider-google-beta/issues/330)) -* bigquery: Add new locations `europe-west2` and `australia-southeast1` to valid location set for `google_bigquery_dataset` ([#41](https://github.com/terraform-providers/terraform-provider-google-beta/issues/41)) -* bigquery: Add `default_partition_expiration_ms` field to `google_bigquery_dataset` resource. ([#127](https://github.com/terraform-providers/terraform-provider-google-beta/issues/127)) -* bigquery: Add `delete_contents_on_destroy` field to `google_bigquery_dataset` resource. ([#413](https://github.com/terraform-providers/terraform-provider-google-beta/issues/413)) -* bigquery: Add `time_partitioning.require_partition_filter` to `google_bigquery_table` resource. ([#324](https://github.com/terraform-providers/terraform-provider-google-beta/issues/324)) -* bigquery: Allow more BigQuery regions ([#269](https://github.com/terraform-providers/terraform-provider-google-beta/issues/269)) -* bigtable: Add `column_family` at create time to `google_bigtable_table`. [[#2228](https://github.com/terraform-providers/terraform-provider-google-beta/issues/2228)](https://github.com/terraform diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/GNUmakefile b/third_party/github.com/hashicorp/terraform-provider-google-beta/GNUmakefile index 8b7ca35957..eccac826ab 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/GNUmakefile +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/GNUmakefile @@ -5,13 +5,13 @@ DIR_NAME=google-beta default: build -build: fmtcheck generate +build: lint generate go install -test: fmtcheck generate +test: lint generate go test $(TESTARGS) -timeout=30s $(TEST) -testacc: fmtcheck +testacc: lint generate TF_ACC=1 TF_SCHEMA_PANIC_ON_ERROR=1 go test $(TEST) -v $(TESTARGS) -timeout 240m -ldflags="-X=github.com/hashicorp/terraform-provider-google-beta/version.ProviderVersion=acc" fmt: @@ -20,17 +20,12 @@ fmt: # Currently required by tf-deploy compile fmtcheck: - @echo "==> Checking source code against gofmt..." @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" -lint: - @echo "==> Checking source code against linters..." - @golangci-lint run ./$(DIR_NAME) +vet: + go vet -tools: - @echo "==> installing required tooling..." - go install github.com/client9/misspell/cmd/misspell - go install github.com/golangci/golangci-lint/cmd/golangci-lint +lint: fmtcheck vet generate: go generate ./... @@ -53,5 +48,5 @@ endif docscheck: @sh -c "'$(CURDIR)/scripts/docscheck.sh'" -.PHONY: build test testacc vet fmt fmtcheck lint tools errcheck test-compile website website-test docscheck generate +.PHONY: build test testacc fmt fmtcheck vet lint errcheck test-compile website website-test docscheck generate diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/go.mod b/third_party/github.com/hashicorp/terraform-provider-google-beta/go.mod index 586c870a3c..41adda30a7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/go.mod +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/go.mod @@ -1,29 +1,205 @@ module github.com/hashicorp/terraform-provider-google-beta +go 1.18 + require ( cloud.google.com/go/bigtable v1.13.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.11.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.16.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/client9/misspell v0.3.4 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 - github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 github.com/golangci/golangci-lint v1.40.1 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-version v1.4.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.16.0 + github.com/hashicorp/go-version v1.6.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.18.0 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/hashstructure v1.1.0 github.com/sirupsen/logrus v1.8.1 - github.com/spf13/afero v1.2.2 // indirect - golang.org/x/mod v0.5.0 // indirect golang.org/x/net v0.0.0-20220526153639-5463443f8c37 golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 google.golang.org/api v0.82.0 - google.golang.org/grpc v1.46.2 + google.golang.org/grpc v1.47.0 ) -go 1.16 +require ( + 4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a // indirect + bitbucket.org/creachadair/stringset v0.0.8 // indirect + cloud.google.com/go v0.100.2 // indirect + cloud.google.com/go/compute v1.6.1 // indirect + cloud.google.com/go/iam v0.1.1 // indirect + github.com/BurntSushi/toml v0.3.1 // indirect + github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/OpenPeeDeeP/depguard v1.0.1 // indirect + github.com/agext/levenshtein v1.2.2 // indirect + github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/ashanbrown/forbidigo v1.1.0 // indirect + github.com/ashanbrown/makezero v0.0.0-20210308000810-4155955488a0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bkielbasa/cyclop v1.2.0 // indirect + github.com/bombsimon/wsl/v3 v3.3.0 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/charithe/durationcheck v0.0.6 // indirect + github.com/chavacava/garif v0.0.0-20210405163807-87a70f3d418b // indirect + github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect + github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect + github.com/daixiang0/gci v0.2.8 // indirect + github.com/denis-tingajkin/go-header v0.4.2 // indirect + github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect + github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect + github.com/esimonov/ifshort v1.0.2 // indirect + github.com/ettle/strcase v0.1.1 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/fzipp/gocyclo v0.3.1 // indirect + github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect + github.com/go-critic/go-critic v0.5.6 // indirect + github.com/go-toolsmith/astcast v1.0.0 // indirect + github.com/go-toolsmith/astcopy v1.0.0 // indirect + github.com/go-toolsmith/astequal v1.0.0 // indirect + github.com/go-toolsmith/astfmt v1.0.0 // indirect + github.com/go-toolsmith/astp v1.0.0 // indirect + github.com/go-toolsmith/strparse v1.0.0 // indirect + github.com/go-toolsmith/typep v1.0.2 // indirect + github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gofrs/flock v0.8.0 // indirect + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect + github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect + github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect + github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect + github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect + github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect + github.com/golangci/misspell v0.3.5 // indirect + github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5 // indirect + github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/google/go-cmp v0.5.8 // indirect + github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 // indirect + github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254 // indirect + github.com/gostaticanalysis/analysisutil v0.4.1 // indirect + github.com/gostaticanalysis/comment v1.4.1 // indirect + github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5 // indirect + github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/go-checkpoint v0.5.0 // indirect + github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect + github.com/hashicorp/go-hclog v1.2.1 // indirect + github.com/hashicorp/go-plugin v1.4.4 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/hc-install v0.4.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl/v2 v2.13.0 // indirect + github.com/hashicorp/logutils v1.0.0 // indirect + github.com/hashicorp/terraform-exec v0.17.2 // indirect + github.com/hashicorp/terraform-json v0.14.0 // indirect + github.com/hashicorp/terraform-plugin-go v0.10.0 // indirect + github.com/hashicorp/terraform-plugin-log v0.4.1 // indirect + github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect + github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect + github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jgautheron/goconst v1.4.0 // indirect + github.com/jingyugao/rowserrcheck v0.0.0-20210315055705-d907ca737bb1 // indirect + github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d // indirect + github.com/kisielk/errcheck v1.6.0 // indirect + github.com/kisielk/gotool v1.0.0 // indirect + github.com/kulti/thelper v0.4.0 // indirect + github.com/kunwardeep/paralleltest v1.0.2 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/kyoh86/exportloopref v0.1.8 // indirect + github.com/ldez/gomoddirectives v0.2.1 // indirect + github.com/ldez/tagliatelle v0.2.0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/maratori/testpackage v1.0.1 // indirect + github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mbilski/exhaustivestruct v1.2.0 // indirect + github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 // indirect + github.com/mgechev/revive v1.0.6 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moricho/tparallel v0.2.1 // indirect + github.com/nakabonne/nestif v0.3.0 // indirect + github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect + github.com/nishanths/exhaustive v0.1.0 // indirect + github.com/nishanths/predeclared v0.2.1 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polyfloyd/go-errorlint v0.0.0-20210418123303-74da32850375 // indirect + github.com/prometheus/client_golang v1.7.1 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.10.0 // indirect + github.com/prometheus/procfs v0.1.3 // indirect + github.com/quasilyte/go-ruleguard v0.3.4 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect + github.com/ryancurrah/gomodguard v1.2.0 // indirect + github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect + github.com/sanposhiho/wastedassign v1.0.0 // indirect + github.com/securego/gosec/v2 v2.7.0 // indirect + github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sonatard/noctx v0.0.1 // indirect + github.com/sourcegraph/go-diff v0.6.1 // indirect + github.com/spf13/afero v1.2.2 // indirect + github.com/spf13/cast v1.3.0 // indirect + github.com/spf13/cobra v1.1.3 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.7.1 // indirect + github.com/ssgreg/nlreturn/v2 v2.1.0 // indirect + github.com/stretchr/objx v0.1.1 // indirect + github.com/stretchr/testify v1.7.2 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b // indirect + github.com/tetafro/godot v1.4.6 // indirect + github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 // indirect + github.com/tomarrell/wrapcheck/v2 v2.1.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.3.2 // indirect + github.com/ultraware/funlen v0.0.3 // indirect + github.com/ultraware/whitespace v0.0.4 // indirect + github.com/uudashr/gocognit v1.0.1 // indirect + github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect + github.com/vmihailenco/tagparser v0.1.1 // indirect + github.com/yeya24/promlinter v0.1.0 // indirect + github.com/zclconf/go-cty v1.10.0 // indirect + go.opencensus.io v0.23.0 // indirect + golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 // indirect + golang.org/x/mod v0.5.0 // indirect + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/tools v0.1.5 // indirect + golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20220527130721-00d5c0f3be58 // indirect + google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/ini.v1 v1.51.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + honnef.co/go/tools v0.1.4 // indirect + mvdan.cc/gofumpt v0.1.1 // indirect + mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect + mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect + mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 // indirect +) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/go.sum b/third_party/github.com/hashicorp/terraform-provider-google-beta/go.sum index c843eb06d4..6d59096b81 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/go.sum +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/go.sum @@ -73,8 +73,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.10.0 h1:zw07xH8x3MxhIG9dvof0FtIyr0iV3Wr7xCpGYMlgygU= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.10.0/go.mod h1:UJoDYx6t3+xCOd+dZX8+NrEB+Y/eW1pQlvxh2Gt7y5E= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.16.0 h1:+zbrl0sUHK+oav4Nhru21AjJLPwnmQCL01oZYzYiPac= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.16.0/go.mod h1:i6Pmzp7aolLmJY86RaJ9wjqm/HFleMeN7Vl5uIWLwE8= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= @@ -219,8 +219,9 @@ github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -368,6 +369,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 h1:5/4TSDzpDnHQ8rKEEQBjRlYx77mHOvXu08oGchxej7o= +github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932/go.mod h1:cC6EdPbj/17GFCPDK39NRarlMI+kt+O60S12cNB5J9Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -450,15 +453,15 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= +github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= -github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-plugin v1.4.4 h1:NVdrSdFRt3SkZtNckJ6tog7gbpRrcbOjQi/rgF7JYWQ= +github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -467,38 +470,36 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= -github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hc-install v0.3.1/go.mod h1:3LCdWcCDS1gaHC9mhHCGbkYfoY6vdsKohGjugbZdZak= -github.com/hashicorp/hc-install v0.3.2 h1:oiQdJZvXmkNcRcEOOfM5n+VTsvNjWQeOjfAoO6dKSH8= -github.com/hashicorp/hc-install v0.3.2/go.mod h1:xMG6Tr8Fw1WFjlxH0A9v61cW15pFwgEGqEz0V4jisHs= +github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= +github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.12.0 h1:PsYxySWpMD4KPaoJLnsHwtK5Qptvj/4Q6s0t4sUxZf4= -github.com/hashicorp/hcl/v2 v2.12.0/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= +github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= +github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/terraform-exec v0.16.1 h1:NAwZFJW2L2SaCBVZoVaH8LPImLOGbPLkSHy0IYbs2uE= -github.com/hashicorp/terraform-exec v0.16.1/go.mod h1:aj0lVshy8l+MHhFNoijNHtqTJQI3Xlowv5EOsEaGO7M= -github.com/hashicorp/terraform-json v0.13.0 h1:Li9L+lKD1FO5RVFRM1mMMIBDoUHslOniyEi5CM+FWGY= -github.com/hashicorp/terraform-json v0.13.0/go.mod h1:y5OdLBCT+rxbwnpxZs9kGL7R9ExU76+cpdY8zHwoazk= -github.com/hashicorp/terraform-plugin-go v0.9.0 h1:FvLY/3z4SNVatPZdoFcyrlNbCar+WyyOTv5X4Tp+WZc= -github.com/hashicorp/terraform-plugin-go v0.9.0/go.mod h1:EawBkgjBWNf7jiKnVoyDyF39OSV+u6KUX+Y73EPj3oM= -github.com/hashicorp/terraform-plugin-log v0.3.0/go.mod h1:EjueSP/HjlyFAsDqt+okpCPjkT4NDynAe32AeDC4vps= -github.com/hashicorp/terraform-plugin-log v0.4.0 h1:F3eVnm8r2EfQCe2k9blPIiF/r2TT01SHijXnS7bujvc= -github.com/hashicorp/terraform-plugin-log v0.4.0/go.mod h1:9KclxdunFownr4pIm1jdmwKRmE4d6HVG2c9XDq47rpg= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.16.0 h1:9fjPgCenJqnbjo95SDcbJ+YdLyEC1N35cwKWcRWhJTQ= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.16.0/go.mod h1:hLa0sTiySU/AWEgV2GxJh0/pQIqcCmm30IPja9N9lTg= -github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896 h1:1FGtlkJw87UsTMg5s8jrekrHmUPUJaMcu6ELiVhQrNw= -github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896/go.mod h1:bzBPnUIkI0RxauU8Dqo+2KrZZ28Cf48s8V6IHt3p4co= +github.com/hashicorp/terraform-exec v0.17.2 h1:EU7i3Fh7vDUI9nNRdMATCEfnm9axzTnad8zszYZ73Go= +github.com/hashicorp/terraform-exec v0.17.2/go.mod h1:tuIbsL2l4MlwwIZx9HPM+LOV9vVyEfBYu2GsO1uH3/8= +github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= +github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= +github.com/hashicorp/terraform-plugin-go v0.10.0 h1:FIQDt/AZDSOXnN+znBnLLZA9aFk4/GwL40rwMLnvuTk= +github.com/hashicorp/terraform-plugin-go v0.10.0/go.mod h1:aphXBG8qtQH0yF1waMRlaw/3G+ZFlR/6Artnvt1QEDE= +github.com/hashicorp/terraform-plugin-log v0.4.1 h1:xpbmVhvuU3mgHzLetOmx9pkOL2rmgpu302XxddON6eo= +github.com/hashicorp/terraform-plugin-log v0.4.1/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.18.0 h1:/cdI5di5XA+N80gXzXF4YcHq36DprBskubk6Z8i26ZQ= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.18.0/go.mod h1:L3SHkD/Q8zPVgXviQmpVwy9nKwpXXZscVIpVEnQ/T50= +github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg= +github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= @@ -595,15 +596,18 @@ github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859 github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -654,7 +658,6 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= @@ -823,8 +826,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhFAz3nWUcuvpH7WuOMv8LQoCWmruLfFH2U= @@ -881,7 +885,6 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.9.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0= github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= @@ -914,7 +917,6 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -923,8 +925,9 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 h1:O8uGbHCqlTp2P6QJSLmCojM4mN6UemYv8K+dCnmHmu0= +golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1015,6 +1018,7 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1070,7 +1074,6 @@ golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1130,6 +1133,7 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1140,6 +1144,7 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1438,8 +1443,9 @@ google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1487,8 +1493,9 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1511,7 +1518,3 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.10.3 h1:DsRf9bbXlHZVSWosvXwFLvtWdNTvIK/Ssbf8JWnrb/o= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.10.3/go.mod h1:UJoDYx6t3+xCOd+dZX8+NrEB+Y/eW1pQlvxh2Gt7y5E= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.11.0 h1:ZGyOhpvdhu9P7KFR3GtqresK5WyCX5bRP3AwCsRTjqw= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.11.0/go.mod h1:UJoDYx6t3+xCOd+dZX8+NrEB+Y/eW1pQlvxh2Gt7y5E= diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/access_context_manager_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/access_context_manager_operation.go index 424ad20c05..4c2fe03a07 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/access_context_manager_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/access_context_manager_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/active_directory_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/active_directory_operation.go index a0456a9900..bb2c772d58 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/active_directory_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/active_directory_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/api_gateway_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/api_gateway_operation.go index 2d2f7c3d70..b6f4c040cb 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/api_gateway_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/api_gateway_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/apigee_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/apigee_operation.go index f1bd1da9cb..5a05d876f3 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/apigee_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/apigee_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/artifact_registry_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/artifact_registry_operation.go index d8157ca3c9..92324ba8e4 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/artifact_registry_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/artifact_registry_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/certificate_manager_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/certificate_manager_operation.go index 8e71c2cc07..f8320d5ed9 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/certificate_manager_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/certificate_manager_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/cloudfunctions2_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/cloudfunctions2_operation.go index 53c35f08cb..e50b32bd6b 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/cloudfunctions2_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/cloudfunctions2_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress.go index 7a896ecb47..176ca49aec 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress.go @@ -8,6 +8,7 @@ import ( "log" "net" "reflect" + "regexp" "strconv" "strings" "time" @@ -210,3 +211,29 @@ func compareOptionalSubnet(_, old, new string, _ *schema.ResourceData) bool { // otherwise compare as self links return compareSelfLinkOrResourceName("", old, new, nil) } + +// Suppress diffs in below cases +// "https://hello-rehvs75zla-uc.a.run.app/" -> "https://hello-rehvs75zla-uc.a.run.app" +// "https://hello-rehvs75zla-uc.a.run.app" -> "https://hello-rehvs75zla-uc.a.run.app/" +func lastSlashDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + if last := len(new) - 1; last >= 0 && new[last] == '/' { + new = new[:last] + } + + if last := len(old) - 1; last >= 0 && old[last] == '/' { + old = old[:last] + } + return new == old +} + +// Suppress diffs when the value read from api +// has the project number instead of the project name +func projectNumberDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + var a2, b2 string + reN := regexp.MustCompile("projects/\\d+") + re := regexp.MustCompile("projects/[^/]+") + replacement := []byte("projects/equal") + a2 = string(reN.ReplaceAll([]byte(old), replacement)) + b2 = string(re.ReplaceAll([]byte(new), replacement)) + return a2 == b2 +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress_test.go index 5634940907..493b9a0d2c 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress_test.go @@ -284,3 +284,42 @@ func TestDurationDiffSuppress(t *testing.T) { } } } + +func TestLastSlashDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "slash to no slash": { + Old: "https://hello-rehvs75zla-uc.a.run.app/", + New: "https://hello-rehvs75zla-uc.a.run.app", + ExpectDiffSuppress: true, + }, + "no slash to slash": { + Old: "https://hello-rehvs75zla-uc.a.run.app", + New: "https://hello-rehvs75zla-uc.a.run.app/", + ExpectDiffSuppress: true, + }, + "slash to slash": { + Old: "https://hello-rehvs75zla-uc.a.run.app/", + New: "https://hello-rehvs75zla-uc.a.run.app/", + ExpectDiffSuppress: true, + }, + "no slash to no slash": { + Old: "https://hello-rehvs75zla-uc.a.run.app", + New: "https://hello-rehvs75zla-uc.a.run.app", + ExpectDiffSuppress: true, + }, + "different domains": { + Old: "https://x.a.run.app/", + New: "https://y.a.run.app", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if lastSlashDiffSuppress("uri", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/compute_instance_helpers.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/compute_instance_helpers.go index fbe60029c1..cd92468ced 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/compute_instance_helpers.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/compute_instance_helpers.go @@ -1,4 +1,3 @@ -// package google import ( @@ -122,15 +121,20 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { scheduling.ProvisioningModel = v.(string) scheduling.ForceSendFields = append(scheduling.ForceSendFields, "ProvisioningModel") } + if v, ok := original["instance_termination_action"]; ok { + scheduling.InstanceTerminationAction = v.(string) + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "InstanceTerminationAction") + } return scheduling, nil } func flattenScheduling(resp *compute.Scheduling) []map[string]interface{} { schedulingMap := map[string]interface{}{ - "on_host_maintenance": resp.OnHostMaintenance, - "preemptible": resp.Preemptible, - "min_node_cpus": resp.MinNodeCpus, - "provisioning_model": resp.ProvisioningModel, + "on_host_maintenance": resp.OnHostMaintenance, + "preemptible": resp.Preemptible, + "min_node_cpus": resp.MinNodeCpus, + "provisioning_model": resp.ProvisioningModel, + "instance_termination_action": resp.InstanceTerminationAction, } if resp.AutomaticRestart != nil { @@ -175,6 +179,7 @@ func flattenIpv6AccessConfigs(ipv6AccessConfigs []*compute.AccessConfig) []map[s "network_tier": ac.NetworkTier, } flattened[i]["public_ptr_domain_name"] = ac.PublicPtrDomainName + flattened[i]["external_ipv6"] = ac.ExternalIpv6 } return flattened } @@ -479,6 +484,10 @@ func schedulingHasChangeWithoutReboot(d *schema.ResourceData) bool { return true } + if oScheduling["instance_termination_action"] != newScheduling["instance_termination_action"] { + return true + } + return false } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/config.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/config.go index 90695db827..04c4cedbec 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/config.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/config.go @@ -67,7 +67,7 @@ type Formatter struct { // Borrowed logic from https://github.com/sirupsen/logrus/blob/master/json_formatter.go and https://github.com/t-tomalak/logrus-easy-formatter/blob/master/formatter.go func (f *Formatter) Format(entry *logrus.Entry) ([]byte, error) { // Suppress logs if TF_LOG is not DEBUG or TRACE - // also suppress frequent transport spam + // Also suppress frequent transport spam if !logging.IsDebugOrHigher() || strings.Contains(entry.Message, "transport is closing") { return nil, nil } @@ -137,6 +137,7 @@ func (f *Formatter) Format(entry *logrus.Entry) ([]byte, error) { // Config is the configuration structure used to instantiate the Google // provider. type Config struct { + DCLConfig AccessToken string Credentials string ImpersonateServiceAccount string @@ -253,26 +254,12 @@ type Config struct { StorageTransferBasePath string BigtableAdminBasePath string + // dcl + ContainerAwsBasePath string + ContainerAzureBasePath string + requestBatcherServiceUsage *RequestBatcher requestBatcherIam *RequestBatcher - - // start DCLBasePaths - // dataprocBasePath is implemented in mm - AssuredWorkloadsBasePath string - ClouddeployBasePath string - CloudResourceManagerBasePath string - ContainerAwsBasePath string - ContainerAzureBasePath string - DataplexBasePath string - EventarcBasePath string - FirebaserulesBasePath string - GkeHubBasePath string - NetworkConnectivityBasePath string - OrgPolicyBasePath string - RecaptchaEnterpriseBasePath string - ApikeysBasePath string - // CloudBuild WorkerPool uses a different endpoint (v1beta1) than any other CloudBuild resources - CloudBuildWorkerPoolBasePath string } const AccessApprovalBasePathKey = "AccessApproval" @@ -364,7 +351,6 @@ const ResourceManagerV3BasePathKey = "ResourceManagerV3" const ServiceNetworkingBasePathKey = "ServiceNetworking" const StorageTransferBasePathKey = "StorageTransfer" const BigtableAdminBasePathKey = "BigtableAdmin" -const GkeHubFeatureBasePathKey = "GkeHubFeature" const ContainerAwsBasePathKey = "ContainerAws" const ContainerAzureBasePathKey = "ContainerAzure" @@ -372,7 +358,7 @@ const ContainerAzureBasePathKey = "ContainerAzure" var DefaultBasePaths = map[string]string{ AccessApprovalBasePathKey: "https://accessapproval.googleapis.com/v1/", AccessContextManagerBasePathKey: "https://accesscontextmanager.googleapis.com/v1/", - ActiveDirectoryBasePathKey: "https://managedidentities.googleapis.com/v1/", + ActiveDirectoryBasePathKey: "https://managedidentities.googleapis.com/v1beta1/", ApiGatewayBasePathKey: "https://apigateway.googleapis.com/v1beta/", ApigeeBasePathKey: "https://apigee.googleapis.com/v1/", AppEngineBasePathKey: "https://appengine.googleapis.com/v1/", @@ -406,7 +392,7 @@ var DefaultBasePaths = map[string]string{ DialogflowBasePathKey: "https://dialogflow.googleapis.com/v2/", DialogflowCXBasePathKey: "https://{{location}}-dialogflow.googleapis.com/v3/", DNSBasePathKey: "https://dns.googleapis.com/dns/v1beta2/", - DocumentAIBasePathKey: "https://documentai.googleapis.com/v1/", + DocumentAIBasePathKey: "https://{{location}}-documentai.googleapis.com/v1/", EssentialContactsBasePathKey: "https://essentialcontacts.googleapis.com/v1/", FilestoreBasePathKey: "https://file.googleapis.com/v1beta1/", FirebaseBasePathKey: "https://firebase.googleapis.com/v1beta1/", @@ -459,7 +445,6 @@ var DefaultBasePaths = map[string]string{ ServiceNetworkingBasePathKey: "https://servicenetworking.googleapis.com/v1/", StorageTransferBasePathKey: "https://storagetransfer.googleapis.com/v1/", BigtableAdminBasePathKey: "https://bigtableadmin.googleapis.com/v2/", - GkeHubFeatureBasePathKey: "https://gkehub.googleapis.com/v1beta/", ContainerAwsBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", ContainerAzureBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/config_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/config_test.go index 21dd0532d6..eeb75aa57a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/config_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/config_test.go @@ -2,7 +2,6 @@ package google import ( "context" - "fmt" "io/ioutil" "os" "testing" @@ -64,7 +63,7 @@ func TestConfigLoadAndValidate_accountFileJSONInvalid(t *testing.T) { func TestAccConfigLoadValidate_credentials(t *testing.T) { if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } testAccPreCheck(t) @@ -92,7 +91,7 @@ func TestAccConfigLoadValidate_credentials(t *testing.T) { func TestAccConfigLoadValidate_impersonated(t *testing.T) { if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } testAccPreCheck(t) @@ -122,7 +121,7 @@ func TestAccConfigLoadValidate_impersonated(t *testing.T) { func TestAccConfigLoadValidate_accessTokenImpersonated(t *testing.T) { if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } testAccPreCheck(t) @@ -162,7 +161,7 @@ func TestAccConfigLoadValidate_accessTokenImpersonated(t *testing.T) { func TestAccConfigLoadValidate_accessToken(t *testing.T) { if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } testAccPreCheck(t) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_fusion_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_fusion_operation.go index 9b9839740d..0df3958a7e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_fusion_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_fusion_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone.go index 40426a68e3..734fe2ac8f 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone.go @@ -26,6 +26,12 @@ func dataSourceDnsManagedZone() *schema.Resource { Computed: true, }, + "managed_zone_id": { + Type: schema.TypeInt, + Computed: true, + Description: `Unique identifier for the resource; defined by the server.`, + }, + "name_servers": { Type: schema.TypeList, Computed: true, @@ -69,18 +75,21 @@ func dataSourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) erro return handleNotFoundError(err, d, fmt.Sprintf("dataSourceDnsManagedZone %q", name)) } - if err := d.Set("name_servers", zone.NameServers); err != nil { - return fmt.Errorf("Error setting name_servers: %s", err) + if err := d.Set("dns_name", zone.DnsName); err != nil { + return fmt.Errorf("Error setting dns_name: %s", err) } if err := d.Set("name", zone.Name); err != nil { return fmt.Errorf("Error setting name: %s", err) } - if err := d.Set("dns_name", zone.DnsName); err != nil { - return fmt.Errorf("Error setting dns_name: %s", err) - } if err := d.Set("description", zone.Description); err != nil { return fmt.Errorf("Error setting description: %s", err) } + if err := d.Set("managed_zone_id", zone.Id); err != nil { + return fmt.Errorf("Error setting managed_zone_id: %s", err) + } + if err := d.Set("name_servers", zone.NameServers); err != nil { + return fmt.Errorf("Error setting name_servers: %s", err) + } if err := d.Set("visibility", zone.Visibility); err != nil { return fmt.Errorf("Error setting visibility: %s", err) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone_test.go index acdbc17fab..e3b0a28f33 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone_test.go @@ -27,7 +27,6 @@ func TestAccDataSourceDnsManagedZone_basic(t *testing.T) { "forwarding_config.#": {}, "force_destroy": {}, "labels.#": {}, - "managed_zone_id": {}, "creation_time": {}, "reverse_lookup": {}, }, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_image_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_image_test.go index c0e760eaa4..b537380a1e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_image_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_image_test.go @@ -82,7 +82,7 @@ func testAccDataSourceCheckPublicImage() resource.TestCheckFunc { ds_attr := ds.Primary.Attributes attrs_to_test := map[string]string{ - "family": "debian-9", + "family": "debian-11", } for attr, expect_value := range attrs_to_test { @@ -96,7 +96,7 @@ func testAccDataSourceCheckPublicImage() resource.TestCheckFunc { } } - selfLink := "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-9-stretch-v20171129" + selfLink := "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-11-bullseye-v20220719" if !compareSelfLinkOrResourceName("", ds_attr["self_link"], selfLink, nil) && ds_attr["self_link"] != selfLink { return fmt.Errorf("self link does not match: %s vs %s", ds_attr["self_link"], selfLink) @@ -109,7 +109,7 @@ func testAccDataSourceCheckPublicImage() resource.TestCheckFunc { var testAccDataSourcePublicImageConfig = ` data "google_compute_image" "debian" { project = "debian-cloud" - name = "debian-9-stretch-v20171129" + name = "debian-11-bullseye-v20220719" } ` diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_instance_group_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_instance_group_test.go index 2817963731..5c685a8e03 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_instance_group_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_instance_group_test.go @@ -198,7 +198,7 @@ func testAccCheckDataSourceGoogleComputeInstanceGroup(dataSourceName string) res func testAccCheckDataSourceGoogleComputeInstanceGroupConfig(instanceName, igName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -241,7 +241,7 @@ data "google_compute_instance_group" "test" { func testAccCheckDataSourceGoogleComputeInstanceGroupConfigWithNamedPort(instanceName, igName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -294,7 +294,7 @@ data "google_compute_instance_group" "test" { func testAccCheckDataSourceGoogleComputeInstanceGroup_fromIGM(igmName, secondIgmName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_region_instance_group_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_region_instance_group_test.go index 75f1718739..647f1197d7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_region_instance_group_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_compute_region_instance_group_test.go @@ -35,7 +35,7 @@ resource "google_compute_target_pool" "foo" { data "google_compute_image" "debian" { project = "debian-cloud" - name = "debian-9-stretch-v20171129" + name = "debian-11-bullseye-v20220719" } resource "google_compute_instance_template" "foo" { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_container_cluster_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_container_cluster_test.go index 8fb9ee6302..ac9ec6030f 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_container_cluster_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_container_cluster_test.go @@ -24,7 +24,6 @@ func TestAccContainerClusterDatasource_zonal(t *testing.T) { map[string]struct{}{ "enable_autopilot": {}, "enable_tpu": {}, - "enable_binary_authorization": {}, "pod_security_policy_config.#": {}, }, ), @@ -51,7 +50,6 @@ func TestAccContainerClusterDatasource_regional(t *testing.T) { map[string]struct{}{ "enable_autopilot": {}, "enable_tpu": {}, - "enable_binary_authorization": {}, "pod_security_policy_config.#": {}, }, ), diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_iam_policy.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_iam_policy.go index cee2d595e7..79887ff109 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_iam_policy.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_iam_policy.go @@ -16,14 +16,14 @@ import ( // to express a Google Cloud IAM policy in a data resource. This is an example // of how the schema would be used in a config: // -// data "google_iam_policy" "admin" { -// binding { -// role = "roles/storage.objectViewer" -// members = [ -// "user:evanbrown@google.com", -// ] -// } -// } +// data "google_iam_policy" "admin" { +// binding { +// role = "roles/storage.objectViewer" +// members = [ +// "user:evanbrown@google.com", +// ] +// } +// } func dataSourceGoogleIamPolicy() *schema.Resource { return &schema.Resource{ Read: dataSourceGoogleIamPolicyRead, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_project.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_project.go index ffc9cbde8e..49ad537f61 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_project.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_project.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -26,7 +27,7 @@ func datasourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error } else { project, err := getProject(d, config) if err != nil { - return err + return fmt.Errorf("no project value set. `project_id` must be set at the resource level, or a default `project` value must be specified on the provider") } d.SetId(fmt.Sprintf("projects/%s", project)) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_service_account_jwt.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_service_account_jwt.go new file mode 100644 index 0000000000..a70f4d0687 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_service_account_jwt.go @@ -0,0 +1,74 @@ +package google + +import ( + "fmt" + "strings" + + iamcredentials "google.golang.org/api/iamcredentials/v1" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceGoogleServiceAccountJwt() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleServiceAccountJwtRead, + Schema: map[string]*schema.Schema{ + "payload": { + Type: schema.TypeString, + Required: true, + Description: `A JSON-encoded JWT claims set that will be included in the signed JWT.`, + }, + "target_service_account": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateRegexp("(" + strings.Join(PossibleServiceAccountNames, "|") + ")"), + }, + "delegates": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateRegexp(ServiceAccountLinkRegex), + }, + }, + "jwt": { + Type: schema.TypeString, + Sensitive: true, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleServiceAccountJwtRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + userAgent, err := generateUserAgentString(d, config.userAgent) + + if err != nil { + return err + } + + name := fmt.Sprintf("projects/-/serviceAccounts/%s", d.Get("target_service_account").(string)) + + jwtRequest := &iamcredentials.SignJwtRequest{ + Payload: d.Get("payload").(string), + Delegates: convertStringSet(d.Get("delegates").(*schema.Set)), + } + + service := config.NewIamCredentialsClient(userAgent) + + jwtResponse, err := service.Projects.ServiceAccounts.SignJwt(name, jwtRequest).Do() + + if err != nil { + return fmt.Errorf("error calling iamcredentials.SignJwt: %w", err) + } + + d.SetId(name) + + if err := d.Set("jwt", jwtResponse.SignedJwt); err != nil { + return fmt.Errorf("error setting jwt attribute: %w", err) + } + + return nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_service_account_jwt_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_service_account_jwt_test.go new file mode 100644 index 0000000000..33b6b8a751 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_service_account_jwt_test.go @@ -0,0 +1,120 @@ +package google + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "strings" + "testing" + + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const ( + jwtTestSubject = "custom-subject" + jwtTestFoo = "bar" + jwtTestComplexFooNested = "baz" +) + +type jwtTestPayload struct { + Subject string `json:"sub"` + + Foo string `json:"foo"` + + ComplexFoo struct { + Nested string `json:"nested"` + } `json:"complexFoo"` +} + +func testAccCheckServiceAccountJwtValue(name, audience string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + + rs, ok := ms.Resources[name] + + if !ok { + return fmt.Errorf("can't find %s in state", name) + } + + jwtString, ok := rs.Primary.Attributes["jwt"] + + if !ok { + return fmt.Errorf("jwt not found") + } + + jwtParts := strings.Split(jwtString, ".") + + if len(jwtParts) != 3 { + return errors.New("jwt does not appear well-formed") + } + + decoded, err := base64.RawURLEncoding.DecodeString(jwtParts[1]) + + if err != nil { + return fmt.Errorf("could not base64 decode jwt body: %w", err) + } + + var payload jwtTestPayload + + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(&payload) + + if err != nil { + return fmt.Errorf("could not decode jwt payload: %w", err) + } + + if payload.Subject != jwtTestSubject { + return fmt.Errorf("invalid 'sub', expected '%s', got '%s'", jwtTestSubject, payload.Subject) + } + + if payload.Foo != jwtTestFoo { + return fmt.Errorf("invalid 'foo', expected '%s', got '%s'", jwtTestFoo, payload.Foo) + } + + if payload.ComplexFoo.Nested != jwtTestComplexFooNested { + return fmt.Errorf("invalid 'foo', expected '%s', got '%s'", jwtTestComplexFooNested, payload.ComplexFoo.Nested) + } + + return nil + } +} + +func TestAccDataSourceGoogleServiceAccountJwt(t *testing.T) { + t.Parallel() + + resourceName := "data.google_service_account_jwt.default" + serviceAccount := getTestServiceAccountFromEnv(t) + targetServiceAccountEmail := BootstrapServiceAccount(t, getTestProjectFromEnv(), serviceAccount) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckGoogleServiceAccountJwt(targetServiceAccountEmail), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceAccountJwtValue(resourceName, targetAudience), + ), + }, + }, + }) +} + +func testAccCheckGoogleServiceAccountJwt(targetServiceAccount string) string { + return fmt.Sprintf(` +data "google_service_account_jwt" "default" { + target_service_account = "%s" + + payload = jsonencode({ + sub: "%s", + foo: "%s", + complexFoo: { + nested: "%s" + } + }) +} +`, targetServiceAccount, jwtTestSubject, jwtTestFoo, jwtTestComplexFooNested) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_sql_backup_run.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_sql_backup_run.go index ec7135732a..d6d3f530f4 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_sql_backup_run.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_sql_backup_run.go @@ -29,6 +29,12 @@ func dataSourceSqlBackupRun() *schema.Resource { Computed: true, Description: `Location of the backups.`, }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Project ID of the project that contains the instance.`, + }, "start_time": { Type: schema.TypeString, Computed: true, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_storage_object_signed_url.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_storage_object_signed_url.go index 938c72f8f3..445e1af8fe 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_storage_object_signed_url.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_storage_object_signed_url.go @@ -14,12 +14,11 @@ import ( "log" "net/url" "os" + "sort" "strconv" "strings" "time" - "sort" - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -170,10 +169,9 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err // loadJwtConfig looks for credentials json in the following places, // in order of preference: -// 1. `credentials` attribute of the datasource -// 2. `credentials` attribute in the provider definition. -// 3. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 1. `credentials` attribute of the datasource +// 2. `credentials` attribute in the provider definition. +// 3. A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS environment variable. func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error) { config := meta.(*Config) @@ -250,7 +248,6 @@ type UrlData struct { // ------------------- // GET // -// // 1388534400 // bucket/objectname // ------------------- diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/dataproc_metastore_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/dataproc_metastore_operation.go index e1bdc4cc48..4f5b190a54 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/dataproc_metastore_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/dataproc_metastore_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/datastore_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/datastore_operation.go index b07661e853..67a7f53c47 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/datastore_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/datastore_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/dialogflow_cx_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/dialogflow_cx_operation.go index de972457f7..b6a8eb6e19 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/dialogflow_cx_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/dialogflow_cx_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/error_retry_predicates.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/error_retry_predicates.go index 6b3983e171..abb26b2816 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/error_retry_predicates.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/error_retry_predicates.go @@ -97,7 +97,7 @@ func isConnectionResetNetworkError(err error) (bool, string) { // Retry 409s because some APIs like Cloud SQL throw a 409 if concurrent calls // are being made. // -//The only way right now to determine it is a retryable 409 due to +// The only way right now to determine it is a retryable 409 due to // concurrent calls is to look at the contents of the error message. // See https://github.com/hashicorp/terraform-provider-google/issues/3279 func is409OperationInProgressError(err error) (bool, string) { @@ -419,3 +419,14 @@ func isBigTableRetryableError(err error) (bool, string) { return false, "" } + +// Concurrent Apigee operations can fail with a 400 error +func isApigeeRetryableError(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 400 && strings.Contains(strings.ToLower(gerr.Body), "the resource is locked by another operation") { + return true, "Waiting for other concurrent operations to finish" + } + } + + return false, "" +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/filestore_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/filestore_operation.go index 4ede494366..4131fe8686 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/filestore_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/filestore_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/firebase_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/firebase_operation.go index 69e45d6d24..c6b778a6de 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/firebase_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/firebase_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/firestore_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/firestore_operation.go index 8a576ff58d..4f1fab3745 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/firestore_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/firestore_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/game_services_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/game_services_operation.go index 629f895574..4b7b600c52 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/game_services_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/game_services_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/gke_hub_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/gke_hub_operation.go index 61f17bce60..924b86a764 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/gke_hub_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/gke_hub_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam2_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam2_operation.go index 2cf4375214..e2ffa59532 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam2_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam2_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_access_context_manager_access_policy.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_access_context_manager_access_policy.go index 51bf6e405d..430f6c2598 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_access_context_manager_access_policy.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_access_context_manager_access_policy.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api.go index fd5a51ca48..faa8d484d2 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api_config.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api_config.go index dd99d31ada..2a394e8026 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api_config.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api_config.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_gateway.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_gateway.go index 8b307405f2..748ccb58a2 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_gateway.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_gateway.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_apigee_environment.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_apigee_environment.go index 7dbe271c0f..e81592fb69 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_apigee_environment.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_apigee_environment.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository.go index 11abc48702..3b72b4bc2a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository_generated_test.go index 7676954f98..d99639269a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository_generated_test.go @@ -15,6 +15,7 @@ package google import ( + "fmt" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -25,20 +26,32 @@ func TestAccArtifactRegistryRepositoryIamBindingGenerated(t *testing.T) { context := map[string]interface{}{ "random_suffix": randString(t, 10), - "role": "roles/viewer", + "role": "roles/artifactregistry.reader", } vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, Steps: []resource.TestStep{ { Config: testAccArtifactRegistryRepositoryIamBinding_basicGenerated(context), }, + { + ResourceName: "google_artifact_registry_repository_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/repositories/%s roles/artifactregistry.reader", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-my-repository%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, { // Test Iam Binding update Config: testAccArtifactRegistryRepositoryIamBinding_updateGenerated(context), }, + { + ResourceName: "google_artifact_registry_repository_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/repositories/%s roles/artifactregistry.reader", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-my-repository%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -48,17 +61,23 @@ func TestAccArtifactRegistryRepositoryIamMemberGenerated(t *testing.T) { context := map[string]interface{}{ "random_suffix": randString(t, 10), - "role": "roles/viewer", + "role": "roles/artifactregistry.reader", } vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, Steps: []resource.TestStep{ { // Test Iam Member creation (no update for member, no need to test) Config: testAccArtifactRegistryRepositoryIamMember_basicGenerated(context), }, + { + ResourceName: "google_artifact_registry_repository_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/repositories/%s roles/artifactregistry.reader user:admin@hashicorptest.com", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-my-repository%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -68,19 +87,31 @@ func TestAccArtifactRegistryRepositoryIamPolicyGenerated(t *testing.T) { context := map[string]interface{}{ "random_suffix": randString(t, 10), - "role": "roles/viewer", + "role": "roles/artifactregistry.reader", } vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, Steps: []resource.TestStep{ { Config: testAccArtifactRegistryRepositoryIamPolicy_basicGenerated(context), }, + { + ResourceName: "google_artifact_registry_repository_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/repositories/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-my-repository%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccArtifactRegistryRepositoryIamPolicy_emptyBinding(context), }, + { + ResourceName: "google_artifact_registry_repository_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/repositories/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-my-repository%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -88,16 +119,13 @@ func TestAccArtifactRegistryRepositoryIamPolicyGenerated(t *testing.T) { func testAccArtifactRegistryRepositoryIamMember_basicGenerated(context map[string]interface{}) string { return Nprintf(` resource "google_artifact_registry_repository" "my-repo" { - provider = google-beta - - location = "us-central1" + location = "us-central1" repository_id = "tf-test-my-repository%{random_suffix}" - description = "example docker repository%{random_suffix}" - format = "DOCKER" + description = "example docker repository%{random_suffix}" + format = "DOCKER" } resource "google_artifact_registry_repository_iam_member" "foo" { - provider = google-beta project = google_artifact_registry_repository.my-repo.project location = google_artifact_registry_repository.my-repo.location repository = google_artifact_registry_repository.my-repo.name @@ -110,16 +138,13 @@ resource "google_artifact_registry_repository_iam_member" "foo" { func testAccArtifactRegistryRepositoryIamPolicy_basicGenerated(context map[string]interface{}) string { return Nprintf(` resource "google_artifact_registry_repository" "my-repo" { - provider = google-beta - - location = "us-central1" + location = "us-central1" repository_id = "tf-test-my-repository%{random_suffix}" - description = "example docker repository%{random_suffix}" - format = "DOCKER" + description = "example docker repository%{random_suffix}" + format = "DOCKER" } data "google_iam_policy" "foo" { - provider = google-beta binding { role = "%{role}" members = ["user:admin@hashicorptest.com"] @@ -127,7 +152,6 @@ data "google_iam_policy" "foo" { } resource "google_artifact_registry_repository_iam_policy" "foo" { - provider = google-beta project = google_artifact_registry_repository.my-repo.project location = google_artifact_registry_repository.my-repo.location repository = google_artifact_registry_repository.my-repo.name @@ -139,20 +163,16 @@ resource "google_artifact_registry_repository_iam_policy" "foo" { func testAccArtifactRegistryRepositoryIamPolicy_emptyBinding(context map[string]interface{}) string { return Nprintf(` resource "google_artifact_registry_repository" "my-repo" { - provider = google-beta - - location = "us-central1" + location = "us-central1" repository_id = "tf-test-my-repository%{random_suffix}" - description = "example docker repository%{random_suffix}" - format = "DOCKER" + description = "example docker repository%{random_suffix}" + format = "DOCKER" } data "google_iam_policy" "foo" { - provider = google-beta } resource "google_artifact_registry_repository_iam_policy" "foo" { - provider = google-beta project = google_artifact_registry_repository.my-repo.project location = google_artifact_registry_repository.my-repo.location repository = google_artifact_registry_repository.my-repo.name @@ -164,16 +184,13 @@ resource "google_artifact_registry_repository_iam_policy" "foo" { func testAccArtifactRegistryRepositoryIamBinding_basicGenerated(context map[string]interface{}) string { return Nprintf(` resource "google_artifact_registry_repository" "my-repo" { - provider = google-beta - - location = "us-central1" + location = "us-central1" repository_id = "tf-test-my-repository%{random_suffix}" - description = "example docker repository%{random_suffix}" - format = "DOCKER" + description = "example docker repository%{random_suffix}" + format = "DOCKER" } resource "google_artifact_registry_repository_iam_binding" "foo" { - provider = google-beta project = google_artifact_registry_repository.my-repo.project location = google_artifact_registry_repository.my-repo.location repository = google_artifact_registry_repository.my-repo.name @@ -186,16 +203,13 @@ resource "google_artifact_registry_repository_iam_binding" "foo" { func testAccArtifactRegistryRepositoryIamBinding_updateGenerated(context map[string]interface{}) string { return Nprintf(` resource "google_artifact_registry_repository" "my-repo" { - provider = google-beta - - location = "us-central1" + location = "us-central1" repository_id = "tf-test-my-repository%{random_suffix}" - description = "example docker repository%{random_suffix}" - format = "DOCKER" + description = "example docker repository%{random_suffix}" + format = "DOCKER" } resource "google_artifact_registry_repository_iam_binding" "foo" { - provider = google-beta project = google_artifact_registry_repository.my-repo.project location = google_artifact_registry_repository.my-repo.location repository = google_artifact_registry_repository.my-repo.name diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_beta_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_beta_operation.go index 36a6a2228a..c6d2c8e252 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_beta_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_beta_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_connection.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_connection.go new file mode 100644 index 0000000000..ce4ba86903 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_connection.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var BigqueryConnectionConnectionIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "connection_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type BigqueryConnectionConnectionIamUpdater struct { + project string + location string + connectionId string + d TerraformResourceData + Config *Config +} + +func BigqueryConnectionConnectionIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := getLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("connection_id"); ok { + values["connection_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("connection_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryConnectionConnectionIamUpdater{ + project: values["project"], + location: values["location"], + connectionId: values["connection_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("connection_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting connection_id: %s", err) + } + + return u, nil +} + +func BigqueryConnectionConnectionIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := getLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryConnectionConnectionIamUpdater{ + project: values["project"], + location: values["location"], + connectionId: values["connection_id"], + d: d, + Config: config, + } + if err := d.Set("connection_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting connection_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyConnectionUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyConnectionUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) qualifyConnectionUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{BigqueryConnectionBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/connections/%s", u.project, u.location, u.connectionId), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/connections/%s", u.project, u.location, u.connectionId) +} + +func (u *BigqueryConnectionConnectionIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-bigqueryconnection-connection-%s", u.GetResourceId()) +} + +func (u *BigqueryConnectionConnectionIamUpdater) DescribeResource() string { + return fmt.Sprintf("bigqueryconnection connection %q", u.GetResourceId()) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_connection_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_connection_generated_test.go new file mode 100644 index 0000000000..8de0aee042 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_connection_generated_test.go @@ -0,0 +1,237 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccBigqueryConnectionConnectionIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccBigqueryConnectionConnectionIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_bigquery_connection_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/connections/%s roles/viewer", getTestProjectFromEnv(), "US", fmt.Sprintf("tf-test-my-connection%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccBigqueryConnectionConnectionIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_bigquery_connection_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/connections/%s roles/viewer", getTestProjectFromEnv(), "US", fmt.Sprintf("tf-test-my-connection%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccBigqueryConnectionConnectionIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccBigqueryConnectionConnectionIamMember_basicGenerated(context), + }, + { + ResourceName: "google_bigquery_connection_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/connections/%s roles/viewer user:admin@hashicorptest.com", getTestProjectFromEnv(), "US", fmt.Sprintf("tf-test-my-connection%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccBigqueryConnectionConnectionIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccBigqueryConnectionConnectionIamPolicy_basicGenerated(context), + }, + { + ResourceName: "google_bigquery_connection_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/connections/%s", getTestProjectFromEnv(), "US", fmt.Sprintf("tf-test-my-connection%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBigqueryConnectionConnectionIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_bigquery_connection_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/connections/%s", getTestProjectFromEnv(), "US", fmt.Sprintf("tf-test-my-connection%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccBigqueryConnectionConnectionIamMember_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_bigquery_connection" "connection" { + connection_id = "tf-test-my-connection%{random_suffix}" + location = "US" + friendly_name = "👋" + description = "a riveting description" + cloud_resource {} +} + +resource "google_bigquery_connection_iam_member" "foo" { + project = google_bigquery_connection.connection.project + location = google_bigquery_connection.connection.location + connection_id = google_bigquery_connection.connection.connection_id + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccBigqueryConnectionConnectionIamPolicy_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_bigquery_connection" "connection" { + connection_id = "tf-test-my-connection%{random_suffix}" + location = "US" + friendly_name = "👋" + description = "a riveting description" + cloud_resource {} +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_bigquery_connection_iam_policy" "foo" { + project = google_bigquery_connection.connection.project + location = google_bigquery_connection.connection.location + connection_id = google_bigquery_connection.connection.connection_id + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccBigqueryConnectionConnectionIamPolicy_emptyBinding(context map[string]interface{}) string { + return Nprintf(` +resource "google_bigquery_connection" "connection" { + connection_id = "tf-test-my-connection%{random_suffix}" + location = "US" + friendly_name = "👋" + description = "a riveting description" + cloud_resource {} +} + +data "google_iam_policy" "foo" { +} + +resource "google_bigquery_connection_iam_policy" "foo" { + project = google_bigquery_connection.connection.project + location = google_bigquery_connection.connection.location + connection_id = google_bigquery_connection.connection.connection_id + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccBigqueryConnectionConnectionIamBinding_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_bigquery_connection" "connection" { + connection_id = "tf-test-my-connection%{random_suffix}" + location = "US" + friendly_name = "👋" + description = "a riveting description" + cloud_resource {} +} + +resource "google_bigquery_connection_iam_binding" "foo" { + project = google_bigquery_connection.connection.project + location = google_bigquery_connection.connection.location + connection_id = google_bigquery_connection.connection.connection_id + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccBigqueryConnectionConnectionIamBinding_updateGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_bigquery_connection" "connection" { + connection_id = "tf-test-my-connection%{random_suffix}" + location = "US" + friendly_name = "👋" + description = "a riveting description" + cloud_resource {} +} + +resource "google_bigquery_connection_iam_binding" "foo" { + project = google_bigquery_connection.connection.project + location = google_bigquery_connection.connection.location + connection_id = google_bigquery_connection.connection.connection_id + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_table.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_table.go index eababb290d..0f176a39c5 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_table.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_table.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_binary_authorization_attestor.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_binary_authorization_attestor.go index 00bb8b79c8..1f69534471 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_binary_authorization_attestor.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_binary_authorization_attestor.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_run_service.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_run_service.go index 49f592e93b..26d770bf3d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_run_service.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_run_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_tasks_queue.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_tasks_queue.go new file mode 100644 index 0000000000..651b8a9a1e --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_tasks_queue.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var CloudTasksQueueIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type CloudTasksQueueIamUpdater struct { + project string + location string + name string + d TerraformResourceData + Config *Config +} + +func CloudTasksQueueIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := getLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/queues/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudTasksQueueIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func CloudTasksQueueIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := getLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/queues/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudTasksQueueIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *CloudTasksQueueIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyQueueUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *CloudTasksQueueIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyQueueUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *CloudTasksQueueIamUpdater) qualifyQueueUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{CloudTasksBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/queues/%s", u.project, u.location, u.name), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *CloudTasksQueueIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/queues/%s", u.project, u.location, u.name) +} + +func (u *CloudTasksQueueIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudtasks-queue-%s", u.GetResourceId()) +} + +func (u *CloudTasksQueueIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudtasks queue %q", u.GetResourceId()) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_tasks_queue_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_tasks_queue_generated_test.go new file mode 100644 index 0000000000..aca5b8cee4 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_tasks_queue_generated_test.go @@ -0,0 +1,210 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccCloudTasksQueueIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCloudTasksQueueIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_cloud_tasks_queue_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/queues/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-cloud-tasks-queue-test%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccCloudTasksQueueIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_cloud_tasks_queue_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/queues/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-cloud-tasks-queue-test%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccCloudTasksQueueIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccCloudTasksQueueIamMember_basicGenerated(context), + }, + { + ResourceName: "google_cloud_tasks_queue_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/queues/%s roles/viewer user:admin@hashicorptest.com", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-cloud-tasks-queue-test%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccCloudTasksQueueIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCloudTasksQueueIamPolicy_basicGenerated(context), + }, + { + ResourceName: "google_cloud_tasks_queue_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/queues/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-cloud-tasks-queue-test%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccCloudTasksQueueIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_cloud_tasks_queue_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/queues/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-cloud-tasks-queue-test%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCloudTasksQueueIamMember_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_tasks_queue" "default" { + name = "tf-test-cloud-tasks-queue-test%{random_suffix}" + location = "us-central1" +} + +resource "google_cloud_tasks_queue_iam_member" "foo" { + project = google_cloud_tasks_queue.default.project + location = google_cloud_tasks_queue.default.location + name = google_cloud_tasks_queue.default.name + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccCloudTasksQueueIamPolicy_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_tasks_queue" "default" { + name = "tf-test-cloud-tasks-queue-test%{random_suffix}" + location = "us-central1" +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_cloud_tasks_queue_iam_policy" "foo" { + project = google_cloud_tasks_queue.default.project + location = google_cloud_tasks_queue.default.location + name = google_cloud_tasks_queue.default.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccCloudTasksQueueIamPolicy_emptyBinding(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_tasks_queue" "default" { + name = "tf-test-cloud-tasks-queue-test%{random_suffix}" + location = "us-central1" +} + +data "google_iam_policy" "foo" { +} + +resource "google_cloud_tasks_queue_iam_policy" "foo" { + project = google_cloud_tasks_queue.default.project + location = google_cloud_tasks_queue.default.location + name = google_cloud_tasks_queue.default.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccCloudTasksQueueIamBinding_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_tasks_queue" "default" { + name = "tf-test-cloud-tasks-queue-test%{random_suffix}" + location = "us-central1" +} + +resource "google_cloud_tasks_queue_iam_binding" "foo" { + project = google_cloud_tasks_queue.default.project + location = google_cloud_tasks_queue.default.location + name = google_cloud_tasks_queue.default.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccCloudTasksQueueIamBinding_updateGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_tasks_queue" "default" { + name = "tf-test-cloud-tasks-queue-test%{random_suffix}" + location = "us-central1" +} + +resource "google_cloud_tasks_queue_iam_binding" "foo" { + project = google_cloud_tasks_queue.default.project + location = google_cloud_tasks_queue.default.location + name = google_cloud_tasks_queue.default.name + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function.go index e607f6060b..652c57cc52 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function_generated_test.go index 066bdd8324..b2753df9db 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function_generated_test.go @@ -15,6 +15,7 @@ package google import ( + "fmt" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -24,24 +25,37 @@ func TestAccCloudfunctions2functionIamBindingGenerated(t *testing.T) { t.Parallel() context := map[string]interface{}{ - "random_suffix": randString(t, 10), - "role": "roles/viewer", - "zip_path": "./test-fixtures/cloudfunctions2/function-source.zip", - "primary_resource_id": "terraform-test2", - "location": "us-central1", + "random_suffix": randString(t, 10), + "role": "roles/viewer", + "project": getTestProjectFromEnv(), + + "zip_path": "./test-fixtures/cloudfunctions2/function-source.zip", + "location": "us-central1", } vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, Steps: []resource.TestStep{ { Config: testAccCloudfunctions2functionIamBinding_basicGenerated(context), }, + { + ResourceName: "google_cloudfunctions2_function_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/functions/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-function-v2%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, { // Test Iam Binding update Config: testAccCloudfunctions2functionIamBinding_updateGenerated(context), }, + { + ResourceName: "google_cloudfunctions2_function_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/functions/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-function-v2%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -50,21 +64,28 @@ func TestAccCloudfunctions2functionIamMemberGenerated(t *testing.T) { t.Parallel() context := map[string]interface{}{ - "random_suffix": randString(t, 10), - "role": "roles/viewer", - "zip_path": "./test-fixtures/cloudfunctions2/function-source.zip", - "primary_resource_id": "terraform-test2", - "location": "us-central1", + "random_suffix": randString(t, 10), + "role": "roles/viewer", + "project": getTestProjectFromEnv(), + + "zip_path": "./test-fixtures/cloudfunctions2/function-source.zip", + "location": "us-central1", } vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, Steps: []resource.TestStep{ { // Test Iam Member creation (no update for member, no need to test) Config: testAccCloudfunctions2functionIamMember_basicGenerated(context), }, + { + ResourceName: "google_cloudfunctions2_function_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/functions/%s roles/viewer user:admin@hashicorptest.com", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-function-v2%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -73,52 +94,67 @@ func TestAccCloudfunctions2functionIamPolicyGenerated(t *testing.T) { t.Parallel() context := map[string]interface{}{ - "random_suffix": randString(t, 10), - "role": "roles/viewer", - "zip_path": "./test-fixtures/cloudfunctions2/function-source.zip", - "primary_resource_id": "terraform-test2", - "location": "us-central1", + "random_suffix": randString(t, 10), + "role": "roles/viewer", + "project": getTestProjectFromEnv(), + + "zip_path": "./test-fixtures/cloudfunctions2/function-source.zip", + "location": "us-central1", } vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, Steps: []resource.TestStep{ { Config: testAccCloudfunctions2functionIamPolicy_basicGenerated(context), }, + { + ResourceName: "google_cloudfunctions2_function_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/functions/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-function-v2%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccCloudfunctions2functionIamPolicy_emptyBinding(context), }, + { + ResourceName: "google_cloudfunctions2_function_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/functions/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-function-v2%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, }, }) } func testAccCloudfunctions2functionIamMember_basicGenerated(context map[string]interface{}) string { return Nprintf(` +# [START functions_v2_basic] +locals { + project = "%{project}" # Google Cloud Platform Project ID +} + resource "google_storage_bucket" "bucket" { - provider = google-beta - name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}" + name = "${local.project}-tf-test-gcf-source%{random_suffix}" # Every bucket name must be globally unique location = "US" uniform_bucket_level_access = true } resource "google_storage_bucket_object" "object" { - provider = google-beta name = "function-source.zip" bucket = google_storage_bucket.bucket.name - source = "%{zip_path}" + source = "%{zip_path}" # Add path to the zipped function source code } -resource "google_cloudfunctions2_function" "terraform-test2" { - provider = google-beta - name = "tf-test-test-function%{random_suffix}" +resource "google_cloudfunctions2_function" "function" { + name = "tf-test-function-v2%{random_suffix}" location = "us-central1" description = "a new function" build_config { runtime = "nodejs16" - entry_point = "helloHttp" + entry_point = "helloHttp" # Set the entry point source { storage_source { bucket = google_storage_bucket.bucket.name @@ -134,10 +170,15 @@ resource "google_cloudfunctions2_function" "terraform-test2" { } } +output "function_uri" { + value = google_cloudfunctions2_function.function.service_config[0].uri +} +# [END functions_v2_basic] + resource "google_cloudfunctions2_function_iam_member" "foo" { - provider = google-beta - cloud_function = google_cloudfunctions2_function.%{primary_resource_id}.name - location = "%{location}" + project = google_cloudfunctions2_function.function.project + location = google_cloudfunctions2_function.function.location + cloud_function = google_cloudfunctions2_function.function.name role = "%{role}" member = "user:admin@hashicorptest.com" } @@ -146,29 +187,31 @@ resource "google_cloudfunctions2_function_iam_member" "foo" { func testAccCloudfunctions2functionIamPolicy_basicGenerated(context map[string]interface{}) string { return Nprintf(` +# [START functions_v2_basic] +locals { + project = "%{project}" # Google Cloud Platform Project ID +} + resource "google_storage_bucket" "bucket" { - provider = google-beta - name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}" + name = "${local.project}-tf-test-gcf-source%{random_suffix}" # Every bucket name must be globally unique location = "US" uniform_bucket_level_access = true } resource "google_storage_bucket_object" "object" { - provider = google-beta name = "function-source.zip" bucket = google_storage_bucket.bucket.name - source = "%{zip_path}" + source = "%{zip_path}" # Add path to the zipped function source code } -resource "google_cloudfunctions2_function" "terraform-test2" { - provider = google-beta - name = "tf-test-test-function%{random_suffix}" +resource "google_cloudfunctions2_function" "function" { + name = "tf-test-function-v2%{random_suffix}" location = "us-central1" description = "a new function" build_config { runtime = "nodejs16" - entry_point = "helloHttp" + entry_point = "helloHttp" # Set the entry point source { storage_source { bucket = google_storage_bucket.bucket.name @@ -184,8 +227,12 @@ resource "google_cloudfunctions2_function" "terraform-test2" { } } +output "function_uri" { + value = google_cloudfunctions2_function.function.service_config[0].uri +} +# [END functions_v2_basic] + data "google_iam_policy" "foo" { - provider = google-beta binding { role = "%{role}" members = ["user:admin@hashicorptest.com"] @@ -193,9 +240,9 @@ data "google_iam_policy" "foo" { } resource "google_cloudfunctions2_function_iam_policy" "foo" { - provider = google-beta - cloud_function = google_cloudfunctions2_function.%{primary_resource_id}.name - location = "%{location}" + project = google_cloudfunctions2_function.function.project + location = google_cloudfunctions2_function.function.location + cloud_function = google_cloudfunctions2_function.function.name policy_data = data.google_iam_policy.foo.policy_data } `, context) @@ -203,29 +250,31 @@ resource "google_cloudfunctions2_function_iam_policy" "foo" { func testAccCloudfunctions2functionIamPolicy_emptyBinding(context map[string]interface{}) string { return Nprintf(` +# [START functions_v2_basic] +locals { + project = "%{project}" # Google Cloud Platform Project ID +} + resource "google_storage_bucket" "bucket" { - provider = google-beta - name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}" + name = "${local.project}-tf-test-gcf-source%{random_suffix}" # Every bucket name must be globally unique location = "US" uniform_bucket_level_access = true } resource "google_storage_bucket_object" "object" { - provider = google-beta name = "function-source.zip" bucket = google_storage_bucket.bucket.name - source = "%{zip_path}" + source = "%{zip_path}" # Add path to the zipped function source code } -resource "google_cloudfunctions2_function" "terraform-test2" { - provider = google-beta - name = "tf-test-test-function%{random_suffix}" +resource "google_cloudfunctions2_function" "function" { + name = "tf-test-function-v2%{random_suffix}" location = "us-central1" description = "a new function" build_config { runtime = "nodejs16" - entry_point = "helloHttp" + entry_point = "helloHttp" # Set the entry point source { storage_source { bucket = google_storage_bucket.bucket.name @@ -241,14 +290,18 @@ resource "google_cloudfunctions2_function" "terraform-test2" { } } +output "function_uri" { + value = google_cloudfunctions2_function.function.service_config[0].uri +} +# [END functions_v2_basic] + data "google_iam_policy" "foo" { - provider = google-beta } resource "google_cloudfunctions2_function_iam_policy" "foo" { - provider = google-beta - cloud_function = google_cloudfunctions2_function.%{primary_resource_id}.name - location = "%{location}" + project = google_cloudfunctions2_function.function.project + location = google_cloudfunctions2_function.function.location + cloud_function = google_cloudfunctions2_function.function.name policy_data = data.google_iam_policy.foo.policy_data } `, context) @@ -256,29 +309,31 @@ resource "google_cloudfunctions2_function_iam_policy" "foo" { func testAccCloudfunctions2functionIamBinding_basicGenerated(context map[string]interface{}) string { return Nprintf(` +# [START functions_v2_basic] +locals { + project = "%{project}" # Google Cloud Platform Project ID +} + resource "google_storage_bucket" "bucket" { - provider = google-beta - name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}" + name = "${local.project}-tf-test-gcf-source%{random_suffix}" # Every bucket name must be globally unique location = "US" uniform_bucket_level_access = true } resource "google_storage_bucket_object" "object" { - provider = google-beta name = "function-source.zip" bucket = google_storage_bucket.bucket.name - source = "%{zip_path}" + source = "%{zip_path}" # Add path to the zipped function source code } -resource "google_cloudfunctions2_function" "terraform-test2" { - provider = google-beta - name = "tf-test-test-function%{random_suffix}" +resource "google_cloudfunctions2_function" "function" { + name = "tf-test-function-v2%{random_suffix}" location = "us-central1" description = "a new function" build_config { runtime = "nodejs16" - entry_point = "helloHttp" + entry_point = "helloHttp" # Set the entry point source { storage_source { bucket = google_storage_bucket.bucket.name @@ -294,10 +349,15 @@ resource "google_cloudfunctions2_function" "terraform-test2" { } } +output "function_uri" { + value = google_cloudfunctions2_function.function.service_config[0].uri +} +# [END functions_v2_basic] + resource "google_cloudfunctions2_function_iam_binding" "foo" { - provider = google-beta - cloud_function = google_cloudfunctions2_function.%{primary_resource_id}.name - location = "%{location}" + project = google_cloudfunctions2_function.function.project + location = google_cloudfunctions2_function.function.location + cloud_function = google_cloudfunctions2_function.function.name role = "%{role}" members = ["user:admin@hashicorptest.com"] } @@ -306,29 +366,31 @@ resource "google_cloudfunctions2_function_iam_binding" "foo" { func testAccCloudfunctions2functionIamBinding_updateGenerated(context map[string]interface{}) string { return Nprintf(` +# [START functions_v2_basic] +locals { + project = "%{project}" # Google Cloud Platform Project ID +} + resource "google_storage_bucket" "bucket" { - provider = google-beta - name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}" + name = "${local.project}-tf-test-gcf-source%{random_suffix}" # Every bucket name must be globally unique location = "US" uniform_bucket_level_access = true } resource "google_storage_bucket_object" "object" { - provider = google-beta name = "function-source.zip" bucket = google_storage_bucket.bucket.name - source = "%{zip_path}" + source = "%{zip_path}" # Add path to the zipped function source code } -resource "google_cloudfunctions2_function" "terraform-test2" { - provider = google-beta - name = "tf-test-test-function%{random_suffix}" +resource "google_cloudfunctions2_function" "function" { + name = "tf-test-function-v2%{random_suffix}" location = "us-central1" description = "a new function" build_config { runtime = "nodejs16" - entry_point = "helloHttp" + entry_point = "helloHttp" # Set the entry point source { storage_source { bucket = google_storage_bucket.bucket.name @@ -344,10 +406,15 @@ resource "google_cloudfunctions2_function" "terraform-test2" { } } +output "function_uri" { + value = google_cloudfunctions2_function.function.service_config[0].uri +} +# [END functions_v2_basic] + resource "google_cloudfunctions2_function_iam_binding" "foo" { - provider = google-beta - cloud_function = google_cloudfunctions2_function.%{primary_resource_id}.name - location = "%{location}" + project = google_cloudfunctions2_function.function.project + location = google_cloudfunctions2_function.function.location + cloud_function = google_cloudfunctions2_function.function.name role = "%{role}" members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions_function.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions_function.go index 1cc5c3d5fa..0f7c69361b 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions_function.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions_function.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudiot_registry.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudiot_registry.go new file mode 100644 index 0000000000..7e980839ae --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudiot_registry.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var CloudIotDeviceRegistryIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type CloudIotDeviceRegistryIamUpdater struct { + project string + region string + name string + d TerraformResourceData + Config *Config +} + +func CloudIotDeviceRegistryIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + region, _ := getRegion(d, config) + if region != "" { + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + } + values["region"] = region + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/registries/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudIotDeviceRegistryIamUpdater{ + project: values["project"], + region: values["region"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", u.region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func CloudIotDeviceRegistryIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + region, _ := getRegion(d, config) + if region != "" { + values["region"] = region + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/registries/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudIotDeviceRegistryIamUpdater{ + project: values["project"], + region: values["region"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyDeviceRegistryUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyDeviceRegistryUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) qualifyDeviceRegistryUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{CloudIotBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/registries/%s", u.project, u.region, u.name), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/registries/%s", u.project, u.region, u.name) +} + +func (u *CloudIotDeviceRegistryIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudiot-deviceregistry-%s", u.GetResourceId()) +} + +func (u *CloudIotDeviceRegistryIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudiot deviceregistry %q", u.GetResourceId()) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudiot_registry_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudiot_registry_generated_test.go new file mode 100644 index 0000000000..1bbea783a1 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudiot_registry_generated_test.go @@ -0,0 +1,211 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccCloudIotDeviceRegistryIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + "project": getTestProjectFromEnv(), + "region": getTestRegionFromEnv(), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCloudIotDeviceRegistryIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_cloudiot_registry_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/registries/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-cloudiot-registry%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccCloudIotDeviceRegistryIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_cloudiot_registry_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/registries/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-cloudiot-registry%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccCloudIotDeviceRegistryIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + "project": getTestProjectFromEnv(), + "region": getTestRegionFromEnv(), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccCloudIotDeviceRegistryIamMember_basicGenerated(context), + }, + { + ResourceName: "google_cloudiot_registry_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/registries/%s roles/viewer user:admin@hashicorptest.com", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-cloudiot-registry%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccCloudIotDeviceRegistryIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + "project": getTestProjectFromEnv(), + "region": getTestRegionFromEnv(), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCloudIotDeviceRegistryIamPolicy_basicGenerated(context), + }, + { + ResourceName: "google_cloudiot_registry_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/registries/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-cloudiot-registry%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccCloudIotDeviceRegistryIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_cloudiot_registry_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/registries/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-cloudiot-registry%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCloudIotDeviceRegistryIamMember_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloudiot_registry" "test-registry" { + name = "tf-test-cloudiot-registry%{random_suffix}" +} + +resource "google_cloudiot_registry_iam_member" "foo" { + project = google_cloudiot_registry.test-registry.project + region = google_cloudiot_registry.test-registry.region + name = google_cloudiot_registry.test-registry.name + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccCloudIotDeviceRegistryIamPolicy_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloudiot_registry" "test-registry" { + name = "tf-test-cloudiot-registry%{random_suffix}" +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_cloudiot_registry_iam_policy" "foo" { + project = google_cloudiot_registry.test-registry.project + region = google_cloudiot_registry.test-registry.region + name = google_cloudiot_registry.test-registry.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccCloudIotDeviceRegistryIamPolicy_emptyBinding(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloudiot_registry" "test-registry" { + name = "tf-test-cloudiot-registry%{random_suffix}" +} + +data "google_iam_policy" "foo" { +} + +resource "google_cloudiot_registry_iam_policy" "foo" { + project = google_cloudiot_registry.test-registry.project + region = google_cloudiot_registry.test-registry.region + name = google_cloudiot_registry.test-registry.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccCloudIotDeviceRegistryIamBinding_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloudiot_registry" "test-registry" { + name = "tf-test-cloudiot-registry%{random_suffix}" +} + +resource "google_cloudiot_registry_iam_binding" "foo" { + project = google_cloudiot_registry.test-registry.project + region = google_cloudiot_registry.test-registry.region + name = google_cloudiot_registry.test-registry.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccCloudIotDeviceRegistryIamBinding_updateGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloudiot_registry" "test-registry" { + name = "tf-test-cloudiot-registry%{random_suffix}" +} + +resource "google_cloudiot_registry_iam_binding" "foo" { + project = google_cloudiot_registry.test-registry.project + region = google_cloudiot_registry.test-registry.region + name = google_cloudiot_registry.test-registry.name + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_bucket.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_bucket.go new file mode 100644 index 0000000000..ae60dbdb42 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_bucket.go @@ -0,0 +1,199 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var ComputeBackendBucketIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type ComputeBackendBucketIamUpdater struct { + project string + name string + d TerraformResourceData + Config *Config +} + +func ComputeBackendBucketIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/backendBuckets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeBackendBucketIamUpdater{ + project: values["project"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func ComputeBackendBucketIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/backendBuckets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeBackendBucketIamUpdater{ + project: values["project"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ComputeBackendBucketIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyBackendBucketUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ComputeBackendBucketIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyBackendBucketUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ComputeBackendBucketIamUpdater) qualifyBackendBucketUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/global/backendBuckets/%s", u.project, u.name), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ComputeBackendBucketIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/global/backendBuckets/%s", u.project, u.name) +} + +func (u *ComputeBackendBucketIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-compute-backendbucket-%s", u.GetResourceId()) +} + +func (u *ComputeBackendBucketIamUpdater) DescribeResource() string { + return fmt.Sprintf("compute backendbucket %q", u.GetResourceId()) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_bucket_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_bucket_generated_test.go new file mode 100644 index 0000000000..fe27dccac1 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_bucket_generated_test.go @@ -0,0 +1,240 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeBackendBucketIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendBucketIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_compute_backend_bucket_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/global/backendBuckets/%s roles/viewer", getTestProjectFromEnv(), fmt.Sprintf("tf-test-image-backend-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccComputeBackendBucketIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_compute_backend_bucket_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/global/backendBuckets/%s roles/viewer", getTestProjectFromEnv(), fmt.Sprintf("tf-test-image-backend-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendBucketIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccComputeBackendBucketIamMember_basicGenerated(context), + }, + { + ResourceName: "google_compute_backend_bucket_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/global/backendBuckets/%s roles/viewer user:admin@hashicorptest.com", getTestProjectFromEnv(), fmt.Sprintf("tf-test-image-backend-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendBucketIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendBucketIamPolicy_basicGenerated(context), + }, + { + ResourceName: "google_compute_backend_bucket_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/global/backendBuckets/%s", getTestProjectFromEnv(), fmt.Sprintf("tf-test-image-backend-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendBucketIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_compute_backend_bucket_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/global/backendBuckets/%s", getTestProjectFromEnv(), fmt.Sprintf("tf-test-image-backend-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeBackendBucketIamMember_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_backend_bucket" "image_backend" { + name = "tf-test-image-backend-bucket%{random_suffix}" + description = "Contains beautiful images" + bucket_name = google_storage_bucket.image_bucket.name + enable_cdn = true +} + +resource "google_storage_bucket" "image_bucket" { + name = "tf-test-image-store-bucket%{random_suffix}" + location = "EU" +} + +resource "google_compute_backend_bucket_iam_member" "foo" { + project = google_compute_backend_bucket.image_backend.project + name = google_compute_backend_bucket.image_backend.name + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccComputeBackendBucketIamPolicy_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_backend_bucket" "image_backend" { + name = "tf-test-image-backend-bucket%{random_suffix}" + description = "Contains beautiful images" + bucket_name = google_storage_bucket.image_bucket.name + enable_cdn = true +} + +resource "google_storage_bucket" "image_bucket" { + name = "tf-test-image-store-bucket%{random_suffix}" + location = "EU" +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_compute_backend_bucket_iam_policy" "foo" { + project = google_compute_backend_bucket.image_backend.project + name = google_compute_backend_bucket.image_backend.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccComputeBackendBucketIamPolicy_emptyBinding(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_backend_bucket" "image_backend" { + name = "tf-test-image-backend-bucket%{random_suffix}" + description = "Contains beautiful images" + bucket_name = google_storage_bucket.image_bucket.name + enable_cdn = true +} + +resource "google_storage_bucket" "image_bucket" { + name = "tf-test-image-store-bucket%{random_suffix}" + location = "EU" +} + +data "google_iam_policy" "foo" { +} + +resource "google_compute_backend_bucket_iam_policy" "foo" { + project = google_compute_backend_bucket.image_backend.project + name = google_compute_backend_bucket.image_backend.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccComputeBackendBucketIamBinding_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_backend_bucket" "image_backend" { + name = "tf-test-image-backend-bucket%{random_suffix}" + description = "Contains beautiful images" + bucket_name = google_storage_bucket.image_bucket.name + enable_cdn = true +} + +resource "google_storage_bucket" "image_bucket" { + name = "tf-test-image-store-bucket%{random_suffix}" + location = "EU" +} + +resource "google_compute_backend_bucket_iam_binding" "foo" { + project = google_compute_backend_bucket.image_backend.project + name = google_compute_backend_bucket.image_backend.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccComputeBackendBucketIamBinding_updateGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_backend_bucket" "image_backend" { + name = "tf-test-image-backend-bucket%{random_suffix}" + description = "Contains beautiful images" + bucket_name = google_storage_bucket.image_bucket.name + enable_cdn = true +} + +resource "google_storage_bucket" "image_bucket" { + name = "tf-test-image-store-bucket%{random_suffix}" + location = "EU" +} + +resource "google_compute_backend_bucket_iam_binding" "foo" { + project = google_compute_backend_bucket.image_backend.project + name = google_compute_backend_bucket.image_backend.name + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_service.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_service.go index 843825a900..064fabfb81 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_service.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk.go index 1589bd95ff..77b791525e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk_generated_test.go index 3b76fdbdcf..8257d89fc8 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk_generated_test.go @@ -122,7 +122,7 @@ resource "google_compute_disk" "default" { name = "tf-test-test-disk%{random_suffix}" type = "pd-ssd" zone = "us-central1-a" - image = "debian-9-stretch-v20200805" + image = "debian-11-bullseye-v20220719" labels = { environment = "dev" } @@ -145,7 +145,7 @@ resource "google_compute_disk" "default" { name = "tf-test-test-disk%{random_suffix}" type = "pd-ssd" zone = "us-central1-a" - image = "debian-9-stretch-v20200805" + image = "debian-11-bullseye-v20220719" labels = { environment = "dev" } @@ -174,7 +174,7 @@ resource "google_compute_disk" "default" { name = "tf-test-test-disk%{random_suffix}" type = "pd-ssd" zone = "us-central1-a" - image = "debian-9-stretch-v20200805" + image = "debian-11-bullseye-v20220719" labels = { environment = "dev" } @@ -199,7 +199,7 @@ resource "google_compute_disk" "default" { name = "tf-test-test-disk%{random_suffix}" type = "pd-ssd" zone = "us-central1-a" - image = "debian-9-stretch-v20200805" + image = "debian-11-bullseye-v20220719" labels = { environment = "dev" } @@ -222,7 +222,7 @@ resource "google_compute_disk" "default" { name = "tf-test-test-disk%{random_suffix}" type = "pd-ssd" zone = "us-central1-a" - image = "debian-9-stretch-v20200805" + image = "debian-11-bullseye-v20220719" labels = { environment = "dev" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_image.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_image.go index dd9d7de81d..f75c49ac57 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_image.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_image.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance.go index 5218a71e1b..4e3c46c94b 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance_generated_test.go index 75c3b78e2c..d0d34cb3b8 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance_generated_test.go @@ -282,7 +282,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -310,7 +310,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -344,7 +344,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -374,7 +374,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -402,7 +402,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -430,7 +430,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -463,7 +463,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -504,7 +504,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -537,7 +537,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -578,7 +578,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image.go index 5275396cc6..a80307db60 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image_generated_test.go index b55a1e9bc9..4424521220 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image_generated_test.go @@ -209,7 +209,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -243,7 +243,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -284,7 +284,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -321,7 +321,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -355,7 +355,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -389,7 +389,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -428,7 +428,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -475,7 +475,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -514,7 +514,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -561,7 +561,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_backend_service.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_backend_service.go index d015340ffd..1f457544d5 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_backend_service.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_backend_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk.go index 94b383fcc4..3d54d8b16e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk_generated_test.go index 0c8e8f855a..e0120e00d4 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk_generated_test.go @@ -130,7 +130,7 @@ resource "google_compute_region_disk" "regiondisk" { resource "google_compute_disk" "disk" { name = "tf-test-my-disk%{random_suffix}" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" size = 50 type = "pd-ssd" zone = "us-central1-a" @@ -166,7 +166,7 @@ resource "google_compute_region_disk" "regiondisk" { resource "google_compute_disk" "disk" { name = "tf-test-my-disk%{random_suffix}" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" size = 50 type = "pd-ssd" zone = "us-central1-a" @@ -208,7 +208,7 @@ resource "google_compute_region_disk" "regiondisk" { resource "google_compute_disk" "disk" { name = "tf-test-my-disk%{random_suffix}" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" size = 50 type = "pd-ssd" zone = "us-central1-a" @@ -246,7 +246,7 @@ resource "google_compute_region_disk" "regiondisk" { resource "google_compute_disk" "disk" { name = "tf-test-my-disk%{random_suffix}" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" size = 50 type = "pd-ssd" zone = "us-central1-a" @@ -282,7 +282,7 @@ resource "google_compute_region_disk" "regiondisk" { resource "google_compute_disk" "disk" { name = "tf-test-my-disk%{random_suffix}" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" size = 50 type = "pd-ssd" zone = "us-central1-a" diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_snapshot.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_snapshot.go new file mode 100644 index 0000000000..4247803521 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_snapshot.go @@ -0,0 +1,199 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var ComputeSnapshotIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type ComputeSnapshotIamUpdater struct { + project string + name string + d TerraformResourceData + Config *Config +} + +func ComputeSnapshotIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeSnapshotIamUpdater{ + project: values["project"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func ComputeSnapshotIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeSnapshotIamUpdater{ + project: values["project"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ComputeSnapshotIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifySnapshotUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ComputeSnapshotIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifySnapshotUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ComputeSnapshotIamUpdater) qualifySnapshotUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/global/snapshots/%s", u.project, u.name), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ComputeSnapshotIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/global/snapshots/%s", u.project, u.name) +} + +func (u *ComputeSnapshotIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-compute-snapshot-%s", u.GetResourceId()) +} + +func (u *ComputeSnapshotIamUpdater) DescribeResource() string { + return fmt.Sprintf("compute snapshot %q", u.GetResourceId()) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_snapshot_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_snapshot_generated_test.go new file mode 100644 index 0000000000..0d5047e9c9 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_snapshot_generated_test.go @@ -0,0 +1,295 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeSnapshotIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccComputeSnapshotIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_compute_snapshot_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/global/snapshots/%s roles/viewer", getTestProjectFromEnv(), fmt.Sprintf("tf-test-my-snapshot%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccComputeSnapshotIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_compute_snapshot_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/global/snapshots/%s roles/viewer", getTestProjectFromEnv(), fmt.Sprintf("tf-test-my-snapshot%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSnapshotIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccComputeSnapshotIamMember_basicGenerated(context), + }, + { + ResourceName: "google_compute_snapshot_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/global/snapshots/%s roles/viewer user:admin@hashicorptest.com", getTestProjectFromEnv(), fmt.Sprintf("tf-test-my-snapshot%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSnapshotIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccComputeSnapshotIamPolicy_basicGenerated(context), + }, + { + ResourceName: "google_compute_snapshot_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/global/snapshots/%s", getTestProjectFromEnv(), fmt.Sprintf("tf-test-my-snapshot%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSnapshotIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_compute_snapshot_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/global/snapshots/%s", getTestProjectFromEnv(), fmt.Sprintf("tf-test-my-snapshot%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeSnapshotIamMember_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_snapshot" "snapshot" { + name = "tf-test-my-snapshot%{random_suffix}" + source_disk = google_compute_disk.persistent.id + zone = "us-central1-a" + labels = { + my_label = "value" + } + storage_locations = ["us-central1"] +} + +data "google_compute_image" "debian" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "persistent" { + name = "tf-test-debian-disk%{random_suffix}" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot_iam_member" "foo" { + project = google_compute_snapshot.snapshot.project + name = google_compute_snapshot.snapshot.name + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccComputeSnapshotIamPolicy_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_snapshot" "snapshot" { + name = "tf-test-my-snapshot%{random_suffix}" + source_disk = google_compute_disk.persistent.id + zone = "us-central1-a" + labels = { + my_label = "value" + } + storage_locations = ["us-central1"] +} + +data "google_compute_image" "debian" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "persistent" { + name = "tf-test-debian-disk%{random_suffix}" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_compute_snapshot_iam_policy" "foo" { + project = google_compute_snapshot.snapshot.project + name = google_compute_snapshot.snapshot.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccComputeSnapshotIamPolicy_emptyBinding(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_snapshot" "snapshot" { + name = "tf-test-my-snapshot%{random_suffix}" + source_disk = google_compute_disk.persistent.id + zone = "us-central1-a" + labels = { + my_label = "value" + } + storage_locations = ["us-central1"] +} + +data "google_compute_image" "debian" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "persistent" { + name = "tf-test-debian-disk%{random_suffix}" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +data "google_iam_policy" "foo" { +} + +resource "google_compute_snapshot_iam_policy" "foo" { + project = google_compute_snapshot.snapshot.project + name = google_compute_snapshot.snapshot.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccComputeSnapshotIamBinding_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_snapshot" "snapshot" { + name = "tf-test-my-snapshot%{random_suffix}" + source_disk = google_compute_disk.persistent.id + zone = "us-central1-a" + labels = { + my_label = "value" + } + storage_locations = ["us-central1"] +} + +data "google_compute_image" "debian" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "persistent" { + name = "tf-test-debian-disk%{random_suffix}" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot_iam_binding" "foo" { + project = google_compute_snapshot.snapshot.project + name = google_compute_snapshot.snapshot.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccComputeSnapshotIamBinding_updateGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_snapshot" "snapshot" { + name = "tf-test-my-snapshot%{random_suffix}" + source_disk = google_compute_disk.persistent.id + zone = "us-central1-a" + labels = { + my_label = "value" + } + storage_locations = ["us-central1"] +} + +data "google_compute_image" "debian" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "persistent" { + name = "tf-test-debian-disk%{random_suffix}" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot_iam_binding" "foo" { + project = google_compute_snapshot.snapshot.project + name = google_compute_snapshot.snapshot.name + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_subnetwork.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_subnetwork.go index 7c18f1ee7f..18852413ad 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_subnetwork.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_subnetwork.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_entry_group.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_entry_group.go index 1293c5d0b5..a90ed7a0a2 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_entry_group.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_entry_group.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_policy_tag.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_policy_tag.go index bafde46023..c9d3a8acc9 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_policy_tag.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_policy_tag.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_tag_template.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_tag_template.go index 0385551589..5ce5a0a398 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_tag_template.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_tag_template.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_taxonomy.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_taxonomy.go index 55c1b600f8..4fb9008faa 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_taxonomy.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_taxonomy.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_autoscaling_policy.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_autoscaling_policy.go new file mode 100644 index 0000000000..f0d5abb3ff --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_autoscaling_policy.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var DataprocAutoscalingPolicyIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "policy_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type DataprocAutoscalingPolicyIamUpdater struct { + project string + location string + policyId string + d TerraformResourceData + Config *Config +} + +func DataprocAutoscalingPolicyIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := getLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("policy_id"); ok { + values["policy_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/autoscalingPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("policy_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocAutoscalingPolicyIamUpdater{ + project: values["project"], + location: values["location"], + policyId: values["policy_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("policy_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting policy_id: %s", err) + } + + return u, nil +} + +func DataprocAutoscalingPolicyIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := getLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/autoscalingPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocAutoscalingPolicyIamUpdater{ + project: values["project"], + location: values["location"], + policyId: values["policy_id"], + d: d, + Config: config, + } + if err := d.Set("policy_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting policy_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyAutoscalingPolicyUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyAutoscalingPolicyUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) qualifyAutoscalingPolicyUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataprocBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s", u.project, u.location, u.policyId), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s", u.project, u.location, u.policyId) +} + +func (u *DataprocAutoscalingPolicyIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataproc-autoscalingpolicy-%s", u.GetResourceId()) +} + +func (u *DataprocAutoscalingPolicyIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataproc autoscalingpolicy %q", u.GetResourceId()) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_autoscaling_policy_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_autoscaling_policy_generated_test.go new file mode 100644 index 0000000000..8016b08331 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_autoscaling_policy_generated_test.go @@ -0,0 +1,275 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataprocAutoscalingPolicyIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataprocAutoscalingPolicyIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_dataproc_autoscaling_policy_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-dataproc-policy%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccDataprocAutoscalingPolicyIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_dataproc_autoscaling_policy_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-dataproc-policy%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDataprocAutoscalingPolicyIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccDataprocAutoscalingPolicyIamMember_basicGenerated(context), + }, + { + ResourceName: "google_dataproc_autoscaling_policy_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s roles/viewer user:admin@hashicorptest.com", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-dataproc-policy%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDataprocAutoscalingPolicyIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataprocAutoscalingPolicyIamPolicy_basicGenerated(context), + }, + { + ResourceName: "google_dataproc_autoscaling_policy_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-dataproc-policy%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDataprocAutoscalingPolicyIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_dataproc_autoscaling_policy_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-dataproc-policy%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDataprocAutoscalingPolicyIamMember_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_autoscaling_policy" "basic" { + policy_id = "tf-test-dataproc-policy%{random_suffix}" + location = "us-central1" + + worker_config { + max_instances = 3 + } + + basic_algorithm { + yarn_config { + graceful_decommission_timeout = "30s" + + scale_up_factor = 0.5 + scale_down_factor = 0.5 + } + } +} + +resource "google_dataproc_autoscaling_policy_iam_member" "foo" { + project = google_dataproc_autoscaling_policy.basic.project + location = google_dataproc_autoscaling_policy.basic.location + policy_id = google_dataproc_autoscaling_policy.basic.policy_id + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccDataprocAutoscalingPolicyIamPolicy_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_autoscaling_policy" "basic" { + policy_id = "tf-test-dataproc-policy%{random_suffix}" + location = "us-central1" + + worker_config { + max_instances = 3 + } + + basic_algorithm { + yarn_config { + graceful_decommission_timeout = "30s" + + scale_up_factor = 0.5 + scale_down_factor = 0.5 + } + } +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_dataproc_autoscaling_policy_iam_policy" "foo" { + project = google_dataproc_autoscaling_policy.basic.project + location = google_dataproc_autoscaling_policy.basic.location + policy_id = google_dataproc_autoscaling_policy.basic.policy_id + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccDataprocAutoscalingPolicyIamPolicy_emptyBinding(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_autoscaling_policy" "basic" { + policy_id = "tf-test-dataproc-policy%{random_suffix}" + location = "us-central1" + + worker_config { + max_instances = 3 + } + + basic_algorithm { + yarn_config { + graceful_decommission_timeout = "30s" + + scale_up_factor = 0.5 + scale_down_factor = 0.5 + } + } +} + +data "google_iam_policy" "foo" { +} + +resource "google_dataproc_autoscaling_policy_iam_policy" "foo" { + project = google_dataproc_autoscaling_policy.basic.project + location = google_dataproc_autoscaling_policy.basic.location + policy_id = google_dataproc_autoscaling_policy.basic.policy_id + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccDataprocAutoscalingPolicyIamBinding_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_autoscaling_policy" "basic" { + policy_id = "tf-test-dataproc-policy%{random_suffix}" + location = "us-central1" + + worker_config { + max_instances = 3 + } + + basic_algorithm { + yarn_config { + graceful_decommission_timeout = "30s" + + scale_up_factor = 0.5 + scale_down_factor = 0.5 + } + } +} + +resource "google_dataproc_autoscaling_policy_iam_binding" "foo" { + project = google_dataproc_autoscaling_policy.basic.project + location = google_dataproc_autoscaling_policy.basic.location + policy_id = google_dataproc_autoscaling_policy.basic.policy_id + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccDataprocAutoscalingPolicyIamBinding_updateGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_autoscaling_policy" "basic" { + policy_id = "tf-test-dataproc-policy%{random_suffix}" + location = "us-central1" + + worker_config { + max_instances = 3 + } + + basic_algorithm { + yarn_config { + graceful_decommission_timeout = "30s" + + scale_up_factor = 0.5 + scale_down_factor = 0.5 + } + } +} + +resource "google_dataproc_autoscaling_policy_iam_binding" "foo" { + project = google_dataproc_autoscaling_policy.basic.project + location = google_dataproc_autoscaling_policy.basic.location + policy_id = google_dataproc_autoscaling_policy.basic.policy_id + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_federation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_federation.go new file mode 100644 index 0000000000..18dcf61d07 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_federation.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var DataprocMetastoreFederationIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "federation_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type DataprocMetastoreFederationIamUpdater struct { + project string + location string + federationId string + d TerraformResourceData + Config *Config +} + +func DataprocMetastoreFederationIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := getLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("federation_id"); ok { + values["federation_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/federations/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("federation_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocMetastoreFederationIamUpdater{ + project: values["project"], + location: values["location"], + federationId: values["federation_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("federation_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting federation_id: %s", err) + } + + return u, nil +} + +func DataprocMetastoreFederationIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := getLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/federations/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocMetastoreFederationIamUpdater{ + project: values["project"], + location: values["location"], + federationId: values["federation_id"], + d: d, + Config: config, + } + if err := d.Set("federation_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting federation_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataprocMetastoreFederationIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyFederationUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataprocMetastoreFederationIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyFederationUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocMetastoreFederationIamUpdater) qualifyFederationUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataprocMetastoreBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/federations/%s", u.project, u.location, u.federationId), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataprocMetastoreFederationIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/federations/%s", u.project, u.location, u.federationId) +} + +func (u *DataprocMetastoreFederationIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataprocmetastore-federation-%s", u.GetResourceId()) +} + +func (u *DataprocMetastoreFederationIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataprocmetastore federation %q", u.GetResourceId()) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_federation_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_federation_generated_test.go new file mode 100644 index 0000000000..f71f88f7f2 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_federation_generated_test.go @@ -0,0 +1,291 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataprocMetastoreFederationIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreFederationIamBinding_basicGenerated(context), + }, + { + // Test Iam Binding update + Config: testAccDataprocMetastoreFederationIamBinding_updateGenerated(context), + }, + }, + }) +} + +func TestAccDataprocMetastoreFederationIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccDataprocMetastoreFederationIamMember_basicGenerated(context), + }, + }, + }) +} + +func TestAccDataprocMetastoreFederationIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreFederationIamPolicy_basicGenerated(context), + }, + { + Config: testAccDataprocMetastoreFederationIamPolicy_emptyBinding(context), + }, + }, + }) +} + +func testAccDataprocMetastoreFederationIamMember_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_federation" "default" { + provider = google-beta + location = "us-central1" + federation_id = "tf-test-fed-1%{random_suffix}" + version = "3.1.2" + + backend_metastores { + rank = "1" + name = google_dataproc_metastore_service.default.id + metastore_type = "DATAPROC_METASTORE" + } +} + +resource "google_dataproc_metastore_service" "default" { + provider = google-beta + service_id = "tf-test-fed-1%{random_suffix}" + location = "us-central1" + tier = "DEVELOPER" + + + hive_metastore_config { + version = "3.1.2" + endpoint_protocol = "GRPC" + } +} + +resource "google_dataproc_metastore_federation_iam_member" "foo" { + provider = google-beta + project = google_dataproc_metastore_federation.default.project + location = google_dataproc_metastore_federation.default.location + federation_id = google_dataproc_metastore_federation.default.federation_id + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccDataprocMetastoreFederationIamPolicy_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_federation" "default" { + provider = google-beta + location = "us-central1" + federation_id = "tf-test-fed-1%{random_suffix}" + version = "3.1.2" + + backend_metastores { + rank = "1" + name = google_dataproc_metastore_service.default.id + metastore_type = "DATAPROC_METASTORE" + } +} + +resource "google_dataproc_metastore_service" "default" { + provider = google-beta + service_id = "tf-test-fed-1%{random_suffix}" + location = "us-central1" + tier = "DEVELOPER" + + + hive_metastore_config { + version = "3.1.2" + endpoint_protocol = "GRPC" + } +} + +data "google_iam_policy" "foo" { + provider = google-beta + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_dataproc_metastore_federation_iam_policy" "foo" { + provider = google-beta + project = google_dataproc_metastore_federation.default.project + location = google_dataproc_metastore_federation.default.location + federation_id = google_dataproc_metastore_federation.default.federation_id + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccDataprocMetastoreFederationIamPolicy_emptyBinding(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_federation" "default" { + provider = google-beta + location = "us-central1" + federation_id = "tf-test-fed-1%{random_suffix}" + version = "3.1.2" + + backend_metastores { + rank = "1" + name = google_dataproc_metastore_service.default.id + metastore_type = "DATAPROC_METASTORE" + } +} + +resource "google_dataproc_metastore_service" "default" { + provider = google-beta + service_id = "tf-test-fed-1%{random_suffix}" + location = "us-central1" + tier = "DEVELOPER" + + + hive_metastore_config { + version = "3.1.2" + endpoint_protocol = "GRPC" + } +} + +data "google_iam_policy" "foo" { + provider = google-beta +} + +resource "google_dataproc_metastore_federation_iam_policy" "foo" { + provider = google-beta + project = google_dataproc_metastore_federation.default.project + location = google_dataproc_metastore_federation.default.location + federation_id = google_dataproc_metastore_federation.default.federation_id + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccDataprocMetastoreFederationIamBinding_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_federation" "default" { + provider = google-beta + location = "us-central1" + federation_id = "tf-test-fed-1%{random_suffix}" + version = "3.1.2" + + backend_metastores { + rank = "1" + name = google_dataproc_metastore_service.default.id + metastore_type = "DATAPROC_METASTORE" + } +} + +resource "google_dataproc_metastore_service" "default" { + provider = google-beta + service_id = "tf-test-fed-1%{random_suffix}" + location = "us-central1" + tier = "DEVELOPER" + + + hive_metastore_config { + version = "3.1.2" + endpoint_protocol = "GRPC" + } +} + +resource "google_dataproc_metastore_federation_iam_binding" "foo" { + provider = google-beta + project = google_dataproc_metastore_federation.default.project + location = google_dataproc_metastore_federation.default.location + federation_id = google_dataproc_metastore_federation.default.federation_id + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccDataprocMetastoreFederationIamBinding_updateGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_federation" "default" { + provider = google-beta + location = "us-central1" + federation_id = "tf-test-fed-1%{random_suffix}" + version = "3.1.2" + + backend_metastores { + rank = "1" + name = google_dataproc_metastore_service.default.id + metastore_type = "DATAPROC_METASTORE" + } +} + +resource "google_dataproc_metastore_service" "default" { + provider = google-beta + service_id = "tf-test-fed-1%{random_suffix}" + location = "us-central1" + tier = "DEVELOPER" + + + hive_metastore_config { + version = "3.1.2" + endpoint_protocol = "GRPC" + } +} + +resource "google_dataproc_metastore_federation_iam_binding" "foo" { + provider = google-beta + project = google_dataproc_metastore_federation.default.project + location = google_dataproc_metastore_federation.default.location + federation_id = google_dataproc_metastore_federation.default.federation_id + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_service.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_service.go new file mode 100644 index 0000000000..bcc3bc91b2 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_service.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var DataprocMetastoreServiceIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "service_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type DataprocMetastoreServiceIamUpdater struct { + project string + location string + serviceId string + d TerraformResourceData + Config *Config +} + +func DataprocMetastoreServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := getLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("service_id"); ok { + values["service_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocMetastoreServiceIamUpdater{ + project: values["project"], + location: values["location"], + serviceId: values["service_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("service_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting service_id: %s", err) + } + + return u, nil +} + +func DataprocMetastoreServiceIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := getLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocMetastoreServiceIamUpdater{ + project: values["project"], + location: values["location"], + serviceId: values["service_id"], + d: d, + Config: config, + } + if err := d.Set("service_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting service_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataprocMetastoreServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyServiceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataprocMetastoreServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyServiceUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocMetastoreServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataprocMetastoreBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.serviceId), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataprocMetastoreServiceIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.serviceId) +} + +func (u *DataprocMetastoreServiceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataprocmetastore-service-%s", u.GetResourceId()) +} + +func (u *DataprocMetastoreServiceIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataprocmetastore service %q", u.GetResourceId()) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_service_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_service_generated_test.go new file mode 100644 index 0000000000..ffec7b136f --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_service_generated_test.go @@ -0,0 +1,265 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataprocMetastoreServiceIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreServiceIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_dataproc_metastore_service_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/services/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-metastore-srv%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccDataprocMetastoreServiceIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_dataproc_metastore_service_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/services/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-metastore-srv%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDataprocMetastoreServiceIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccDataprocMetastoreServiceIamMember_basicGenerated(context), + }, + { + ResourceName: "google_dataproc_metastore_service_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/services/%s roles/viewer user:admin@hashicorptest.com", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-metastore-srv%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDataprocMetastoreServiceIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreServiceIamPolicy_basicGenerated(context), + }, + { + ResourceName: "google_dataproc_metastore_service_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/services/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-metastore-srv%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDataprocMetastoreServiceIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_dataproc_metastore_service_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/services/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("tf-test-metastore-srv%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDataprocMetastoreServiceIamMember_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_service" "default" { + service_id = "tf-test-metastore-srv%{random_suffix}" + location = "us-central1" + port = 9080 + tier = "DEVELOPER" + + maintenance_window { + hour_of_day = 2 + day_of_week = "SUNDAY" + } + + hive_metastore_config { + version = "2.3.6" + } +} + +resource "google_dataproc_metastore_service_iam_member" "foo" { + project = google_dataproc_metastore_service.default.project + location = google_dataproc_metastore_service.default.location + service_id = google_dataproc_metastore_service.default.service_id + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccDataprocMetastoreServiceIamPolicy_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_service" "default" { + service_id = "tf-test-metastore-srv%{random_suffix}" + location = "us-central1" + port = 9080 + tier = "DEVELOPER" + + maintenance_window { + hour_of_day = 2 + day_of_week = "SUNDAY" + } + + hive_metastore_config { + version = "2.3.6" + } +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_dataproc_metastore_service_iam_policy" "foo" { + project = google_dataproc_metastore_service.default.project + location = google_dataproc_metastore_service.default.location + service_id = google_dataproc_metastore_service.default.service_id + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccDataprocMetastoreServiceIamPolicy_emptyBinding(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_service" "default" { + service_id = "tf-test-metastore-srv%{random_suffix}" + location = "us-central1" + port = 9080 + tier = "DEVELOPER" + + maintenance_window { + hour_of_day = 2 + day_of_week = "SUNDAY" + } + + hive_metastore_config { + version = "2.3.6" + } +} + +data "google_iam_policy" "foo" { +} + +resource "google_dataproc_metastore_service_iam_policy" "foo" { + project = google_dataproc_metastore_service.default.project + location = google_dataproc_metastore_service.default.location + service_id = google_dataproc_metastore_service.default.service_id + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccDataprocMetastoreServiceIamBinding_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_service" "default" { + service_id = "tf-test-metastore-srv%{random_suffix}" + location = "us-central1" + port = 9080 + tier = "DEVELOPER" + + maintenance_window { + hour_of_day = 2 + day_of_week = "SUNDAY" + } + + hive_metastore_config { + version = "2.3.6" + } +} + +resource "google_dataproc_metastore_service_iam_binding" "foo" { + project = google_dataproc_metastore_service.default.project + location = google_dataproc_metastore_service.default.location + service_id = google_dataproc_metastore_service.default.service_id + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccDataprocMetastoreServiceIamBinding_updateGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_service" "default" { + service_id = "tf-test-metastore-srv%{random_suffix}" + location = "us-central1" + port = 9080 + tier = "DEVELOPER" + + maintenance_window { + hour_of_day = 2 + day_of_week = "SUNDAY" + } + + hive_metastore_config { + version = "2.3.6" + } +} + +resource "google_dataproc_metastore_service_iam_binding" "foo" { + project = google_dataproc_metastore_service.default.project + location = google_dataproc_metastore_service.default.location + service_id = google_dataproc_metastore_service.default.service_id + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service.go index bf8d859264..a26da84ce3 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service_consumers.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service_consumers.go index e95202a874..3ac5cfc429 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service_consumers.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service_consumers.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_gke_hub_membership.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_gke_hub_membership.go new file mode 100644 index 0000000000..d916f4590c --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_gke_hub_membership.go @@ -0,0 +1,199 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var GKEHubMembershipIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "membership_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type GKEHubMembershipIamUpdater struct { + project string + membershipId string + d TerraformResourceData + Config *Config +} + +func GKEHubMembershipIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("membership_id"); ok { + values["membership_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/memberships/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("membership_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEHubMembershipIamUpdater{ + project: values["project"], + membershipId: values["membership_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("membership_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting membership_id: %s", err) + } + + return u, nil +} + +func GKEHubMembershipIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/memberships/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEHubMembershipIamUpdater{ + project: values["project"], + membershipId: values["membership_id"], + d: d, + Config: config, + } + if err := d.Set("membership_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting membership_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *GKEHubMembershipIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyMembershipUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *GKEHubMembershipIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyMembershipUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *GKEHubMembershipIamUpdater) qualifyMembershipUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{GKEHubBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/global/memberships/%s", u.project, u.membershipId), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *GKEHubMembershipIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/global/memberships/%s", u.project, u.membershipId) +} + +func (u *GKEHubMembershipIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-gkehub-membership-%s", u.GetResourceId()) +} + +func (u *GKEHubMembershipIamUpdater) DescribeResource() string { + return fmt.Sprintf("gkehub membership %q", u.GetResourceId()) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_gke_hub_membership_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_gke_hub_membership_generated_test.go new file mode 100644 index 0000000000..dac247b7f3 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_gke_hub_membership_generated_test.go @@ -0,0 +1,255 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccGKEHubMembershipIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccGKEHubMembershipIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_gke_hub_membership_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/memberships/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("basic%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccGKEHubMembershipIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_gke_hub_membership_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/memberships/%s roles/viewer", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("basic%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccGKEHubMembershipIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccGKEHubMembershipIamMember_basicGenerated(context), + }, + { + ResourceName: "google_gke_hub_membership_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/memberships/%s roles/viewer user:admin@hashicorptest.com", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("basic%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccGKEHubMembershipIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + "role": "roles/viewer", + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccGKEHubMembershipIamPolicy_basicGenerated(context), + }, + { + ResourceName: "google_gke_hub_membership_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/memberships/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("basic%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubMembershipIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_gke_hub_membership_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/memberships/%s", getTestProjectFromEnv(), getTestRegionFromEnv(), fmt.Sprintf("basic%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHubMembershipIamMember_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_container_cluster" "primary" { + name = "basiccluster%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 +} + +resource "google_gke_hub_membership" "membership" { + membership_id = "basic%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +} + +resource "google_gke_hub_membership_iam_member" "foo" { + project = google_gke_hub_membership.membership.project + membership_id = google_gke_hub_membership.membership.membership_id + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccGKEHubMembershipIamPolicy_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_container_cluster" "primary" { + name = "basiccluster%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 +} + +resource "google_gke_hub_membership" "membership" { + membership_id = "basic%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_gke_hub_membership_iam_policy" "foo" { + project = google_gke_hub_membership.membership.project + membership_id = google_gke_hub_membership.membership.membership_id + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccGKEHubMembershipIamPolicy_emptyBinding(context map[string]interface{}) string { + return Nprintf(` +resource "google_container_cluster" "primary" { + name = "basiccluster%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 +} + +resource "google_gke_hub_membership" "membership" { + membership_id = "basic%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +} + +data "google_iam_policy" "foo" { +} + +resource "google_gke_hub_membership_iam_policy" "foo" { + project = google_gke_hub_membership.membership.project + membership_id = google_gke_hub_membership.membership.membership_id + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccGKEHubMembershipIamBinding_basicGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_container_cluster" "primary" { + name = "basiccluster%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 +} + +resource "google_gke_hub_membership" "membership" { + membership_id = "basic%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +} + +resource "google_gke_hub_membership_iam_binding" "foo" { + project = google_gke_hub_membership.membership.project + membership_id = google_gke_hub_membership.membership.membership_id + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccGKEHubMembershipIamBinding_updateGenerated(context map[string]interface{}) string { + return Nprintf(` +resource "google_container_cluster" "primary" { + name = "basiccluster%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 +} + +resource "google_gke_hub_membership" "membership" { + membership_id = "basic%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +} + +resource "google_gke_hub_membership_iam_binding" "foo" { + project = google_gke_hub_membership.membership.project + membership_id = google_gke_hub_membership.membership.membership_id + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_healthcare_consent_store.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_healthcare_consent_store.go index 6a0c83c5c9..8c5dfcfc5c 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_healthcare_consent_store.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_healthcare_consent_store.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_service.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_service.go index 3ad94c8bdf..c4340d29c5 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_service.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_version.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_version.go index 9168020cfe..65f567009e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_version.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_version.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel.go index 618ce5c72a..0ce966c0fc 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance.go index dabc9c4d4b..b9bf4ba1ff 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance_generated_test.go index 12afd39941..aaee7f3159 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance_generated_test.go @@ -282,7 +282,7 @@ resource "google_compute_instance" "tunnelvm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -310,7 +310,7 @@ resource "google_compute_instance" "tunnelvm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -344,7 +344,7 @@ resource "google_compute_instance" "tunnelvm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -374,7 +374,7 @@ resource "google_compute_instance" "tunnelvm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -402,7 +402,7 @@ resource "google_compute_instance" "tunnelvm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -430,7 +430,7 @@ resource "google_compute_instance" "tunnelvm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -463,7 +463,7 @@ resource "google_compute_instance" "tunnelvm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -504,7 +504,7 @@ resource "google_compute_instance" "tunnelvm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -537,7 +537,7 @@ resource "google_compute_instance" "tunnelvm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -578,7 +578,7 @@ resource "google_compute_instance" "tunnelvm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web.go index 994c2f4816..84abcba2d0 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_backend_service.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_backend_service.go index 97900452a7..72886bfe28 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_backend_service.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_backend_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_app_engine.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_app_engine.go index 723bc87a01..f9540ecb33 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_app_engine.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_app_engine.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_compute.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_compute.go index 2ebd9690d1..fadfe7226a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_compute.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_compute.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_instance.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_instance.go index 7a7e42f587..b92d80e3c8 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_instance.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_instance.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_runtime.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_runtime.go index b5e1564939..0306ea8cfd 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_runtime.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_runtime.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_ca_pool.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_ca_pool.go index 86e9afecc5..4e6ac1aca7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_ca_pool.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_ca_pool.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_certificate_template.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_certificate_template.go index acefe6605b..b8f0ffe31d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_certificate_template.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_certificate_template.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_pubsub_topic.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_pubsub_topic.go index a4b9d4a76e..5d87c9b0c0 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_pubsub_topic.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_pubsub_topic.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_runtimeconfig_config.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_runtimeconfig_config.go index 13c7e8ff79..6839ffc32b 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_runtimeconfig_config.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_runtimeconfig_config.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_secret_manager_secret.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_secret_manager_secret.go index b4a453b12e..34feb85f8d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_secret_manager_secret.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_secret_manager_secret.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_namespace.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_namespace.go index bc9932e7e8..df172f7af0 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_namespace.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_namespace.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_service.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_service.go index 4db2c80f25..afbd6a0fa7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_service.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_sourcerepo_repository.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_sourcerepo_repository.go index ad353d07d7..5740368446 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_sourcerepo_repository.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_sourcerepo_repository.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_storage_bucket.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_storage_bucket.go index d53f8568bf..ea23f944ea 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_storage_bucket.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_storage_bucket.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_key.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_key.go index f424b2bcd8..1091b1afef 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_key.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_key.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_value.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_value.go index 59c34056ec..01de025826 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_value.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_value.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/image.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/image.go index 11ec84a8e3..21b17dc896 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/image.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/image.go @@ -26,7 +26,7 @@ var ( resolveImageLink = regexp.MustCompile(fmt.Sprintf("^https://www.googleapis.com/compute/[a-z0-9]+/projects/(%s)/global/images/(%s)", ProjectRegex, resolveImageImageRegex)) windowsSqlImage = regexp.MustCompile("^sql-(?:server-)?([0-9]{4})-([a-z]+)-windows-(?:server-)?([0-9]{4})(?:-r([0-9]+))?-dc-v[0-9]+$") - canonicalUbuntuLtsImage = regexp.MustCompile("^ubuntu-(minimal-)?([0-9]+)-") + canonicalUbuntuLtsImage = regexp.MustCompile("^ubuntu-(minimal-)?([0-9]+)(?:.*(arm64))?.*$") cosLtsImage = regexp.MustCompile("^cos-([0-9]+)-") ) @@ -78,13 +78,18 @@ func sanityTestRegexMatches(expected int, got []string, regexType, name string) // If it's in the form global/images/{image}, return it // If it's in the form global/images/family/{family}, return it // If it's in the form family/{family}, check if it's a family in the current project. If it is, return it as global/images/family/{family}. -// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family}. +// +// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family}. +// // If it's in the form {project}/{family-or-image}, check if it's an image in the named project. If it is, return it as projects/{project}/global/images/{image}. -// If not, check if it's a family in the named project. If it is, return it as projects/{project}/global/images/family/{family}. +// +// If not, check if it's a family in the named project. If it is, return it as projects/{project}/global/images/family/{family}. +// // If it's in the form {family-or-image}, check if it's an image in the current project. If it is, return it as global/images/{image}. -// If not, check if it could be a GCP-provided image, and if it exists. If it does, return it as projects/{project}/global/images/{image}. -// If not, check if it's a family in the current project. If it is, return it as global/images/family/{family}. -// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family} +// +// If not, check if it could be a GCP-provided image, and if it exists. If it does, return it as projects/{project}/global/images/{image}. +// If not, check if it's a family in the current project. If it is, return it as global/images/family/{family}. +// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family} func resolveImage(c *Config, project, name, userAgent string) (string, error) { var builtInProject string for k, v := range imageMap { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/memcache_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/memcache_operation.go index 056bd6aa87..a0ebc7fb7f 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/memcache_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/memcache_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/ml_engine_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/ml_engine_operation.go index d4ffc0f92c..923108f9aa 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/ml_engine_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/ml_engine_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_management_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_management_operation.go index f31a6d4fe7..4f2644a15f 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_management_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_management_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_services_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_services_operation.go index 5e09e88fac..b7c09a4ce1 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_services_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_services_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/node_config.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/node_config.go index 41b23d5ab3..643f15d319 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/node_config.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/node_config.go @@ -182,6 +182,7 @@ func schemaNodeConfig() *schema.Schema { "min_cpu_platform": { Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, Description: `Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.`, }, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/notebooks_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/notebooks_operation.go index fc4c8668e5..18213a3474 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/notebooks_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/notebooks_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_ca_utils.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_ca_utils.go new file mode 100644 index 0000000000..6cc7b6d72b --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_ca_utils.go @@ -0,0 +1,221 @@ +package google + +import ( + "fmt" + "log" + "math/rand" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// CA related utilities. + +func enableCA(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + enableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:enable") + if err != nil { + return err + } + + log.Printf("[DEBUG] Enabling CertificateAuthority") + + res, err := sendRequest(config, "POST", billingProject, enableUrl, userAgent, nil) + if err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %s", err) + } + + var opRes map[string]interface{} + err = privatecaOperationWaitTimeWithResponse( + config, res, &opRes, project, "Enabling CertificateAuthority", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting to enable CertificateAuthority: %s", err) + } + return nil +} + +func disableCA(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + disableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:disable") + if err != nil { + return err + } + + log.Printf("[DEBUG] Disabling CA") + + dRes, err := sendRequest(config, "POST", billingProject, disableUrl, userAgent, nil) + if err != nil { + return fmt.Errorf("Error disabling CA: %s", err) + } + + var opRes map[string]interface{} + err = privatecaOperationWaitTimeWithResponse( + config, dRes, &opRes, project, "Disabling CA", userAgent, + d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf("Error waiting to disable CA: %s", err) + } + return nil +} + +func activateSubCAWithThirdPartyIssuer(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + // 1. prepare parameters + signedCACert := d.Get("pem_ca_certificate").(string) + + sc, ok := d.GetOk("subordinate_config") + if !ok { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + c := sc.([]interface{}) + if len(c) == 0 || c[0] == nil { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + chain, ok := c[0].(map[string]interface{})["pem_issuer_chain"] + if !ok { + return fmt.Errorf("subordinate_config.pem_issuer_chain is required to activate subordinate CA with third party issuer") + } + issuerChain := chain.([]interface{}) + if len(issuerChain) == 0 || issuerChain[0] == nil { + return fmt.Errorf("subordinate_config.pem_issuer_chain is required to activate subordinate CA with third party issuer") + } + pc := issuerChain[0].(map[string]interface{})["pem_certificates"].([]interface{}) + pemIssuerChain := make([]string, 0, len(pc)) + for _, pem := range pc { + pemIssuerChain = append(pemIssuerChain, pem.(string)) + } + + // 2. activate CA + activateObj := make(map[string]interface{}) + activateObj["pemCaCertificate"] = signedCACert + activateObj["subordinateConfig"] = make(map[string]interface{}) + activateObj["subordinateConfig"].(map[string]interface{})["pemIssuerChain"] = make(map[string]interface{}) + activateObj["subordinateConfig"].(map[string]interface{})["pemIssuerChain"].(map[string]interface{})["pemCertificates"] = pemIssuerChain + + activateUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:activate") + if err != nil { + return err + } + + log.Printf("[DEBUG] Activating CertificateAuthority: %#v", activateObj) + res, err := sendRequest(config, "POST", billingProject, activateUrl, userAgent, activateObj) + if err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %s", err) + } + + var opRes map[string]interface{} + err = privatecaOperationWaitTimeWithResponse( + config, res, &opRes, project, "Activating CertificateAuthority", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting to actiavte CertificateAuthority: %s", err) + } + return nil +} + +func activateSubCAWithFirstPartyIssuer(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + // 1. get issuer + sc, ok := d.GetOk("subordinate_config") + if !ok { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + c := sc.([]interface{}) + if len(c) == 0 || c[0] == nil { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + ca, ok := c[0].(map[string]interface{})["certificate_authority"] + if !ok { + return fmt.Errorf("subordinate_config.certificate_authority is required to activate subordinate CA with first party issuer") + } + issuer := ca.(string) + + // 2. fetch CSR + fetchCSRUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:fetch") + if err != nil { + return err + } + res, err := sendRequest(config, "GET", billingProject, fetchCSRUrl, userAgent, nil) + if err != nil { + return fmt.Errorf("failed to fetch CSR: %v", err) + } + csr := res["pemCsr"] + + // 3. sign the CSR with first party issuer + genCertId := func() string { + currentTime := time.Now() + dateStr := currentTime.Format("20060102") + + rand.Seed(time.Now().UnixNano()) + const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + rand1 := make([]byte, 3) + for i := range rand1 { + rand1[i] = letters[rand.Intn(len(letters))] + } + rand2 := make([]byte, 3) + for i := range rand2 { + rand2[i] = letters[rand.Intn(len(letters))] + } + return fmt.Sprintf("subordinate-%v-%v-%v", dateStr, string(rand1), string(rand2)) + } + + // parseCAName parses a CA name and return the CaPool name and CaId. + parseCAName := func(n string) (string, string, error) { + parts := regexp.MustCompile(`(projects/[a-z0-9-]+/locations/[a-z0-9-]+/caPools/[a-zA-Z0-9-]+)/certificateAuthorities/([a-zA-Z0-9-]+)`).FindStringSubmatch(n) + if len(parts) != 3 { + return "", "", fmt.Errorf("failed to parse CA name: %v, parts: %v", n, parts) + } + return parts[1], parts[2], err + } + + obj := make(map[string]interface{}) + obj["pemCsr"] = csr + obj["lifetime"] = d.Get("lifetime") + + certId := genCertId() + poolName, issuerId, err := parseCAName(issuer) + if err != nil { + return err + } + + PrivatecaBasePath, err := replaceVars(d, config, "{{PrivatecaBasePath}}") + if err != nil { + return err + } + signUrl := fmt.Sprintf("%v%v/certificates?certificateId=%v", PrivatecaBasePath, poolName, certId) + signUrl, err = addQueryParams(signUrl, map[string]string{"issuingCertificateAuthorityId": issuerId}) + if err != nil { + return err + } + + log.Printf("[DEBUG] Signing CA Certificate: %#v", obj) + res, err = sendRequestWithTimeout(config, "POST", billingProject, signUrl, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Certificate: %s", err) + } + signedCACert := res["pemCertificate"] + + // 4. activate sub CA with the signed CA cert. + activateObj := make(map[string]interface{}) + activateObj["pemCaCertificate"] = signedCACert + activateObj["subordinateConfig"] = make(map[string]interface{}) + activateObj["subordinateConfig"].(map[string]interface{})["certificateAuthority"] = issuer + + activateUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:activate") + if err != nil { + return err + } + + log.Printf("[DEBUG] Activating CertificateAuthority: %#v", activateObj) + res, err = sendRequest(config, "POST", billingProject, activateUrl, userAgent, activateObj) + if err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %s", err) + } + + var opRes map[string]interface{} + err = privatecaOperationWaitTimeWithResponse( + config, res, &opRes, project, "Enabling CertificateAuthority", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting to actiavte CertificateAuthority: %s", err) + } + return nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_operation.go index 9b950536a5..b3a8a13259 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider.go index 175cc4cf93..052801989c 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider.go @@ -797,22 +797,8 @@ func Provider() *schema.Provider { BigtableAdminCustomEndpointEntryKey: BigtableAdminCustomEndpointEntry, // dcl - AssuredWorkloadsEndpointEntryKey: AssuredWorkloadsEndpointEntry, - ClouddeployEndpointEntryKey: ClouddeployEndpointEntry, - CloudResourceManagerEndpointEntryKey: CloudResourceManagerEndpointEntry, - DataplexEndpointEntryKey: DataplexEndpointEntry, - EventarcEndpointEntryKey: EventarcEndpointEntry, - FirebaserulesEndpointEntryKey: FirebaserulesEndpointEntry, - GkeHubFeatureCustomEndpointEntryKey: GkeHubFeatureCustomEndpointEntry, - NetworkConnectivityEndpointEntryKey: NetworkConnectivityEndpointEntry, - OrgPolicyEndpointEntryKey: OrgPolicyEndpointEntry, - PrivatecaCertificateTemplateEndpointEntryKey: PrivatecaCertificateTemplateCustomEndpointEntry, - RecaptchaEnterpriseEndpointEntryKey: RecaptchaEnterpriseEndpointEntry, - ContainerAwsCustomEndpointEntryKey: ContainerAwsCustomEndpointEntry, - ContainerAzureCustomEndpointEntryKey: ContainerAzureCustomEndpointEntry, - ApikeysEndpointEntryKey: ApikeysEndpointEntry, - - CloudBuildWorkerPoolEndpointEntryKey: CloudBuildWorkerPoolEndpointEntry, + ContainerAwsCustomEndpointEntryKey: ContainerAwsCustomEndpointEntry, + ContainerAzureCustomEndpointEntryKey: ContainerAzureCustomEndpointEntry, }, ProviderMetaSchema: map[string]*schema.Schema{ @@ -918,6 +904,7 @@ func Provider() *schema.Provider { "google_service_account": dataSourceGoogleServiceAccount(), "google_service_account_access_token": dataSourceGoogleServiceAccountAccessToken(), "google_service_account_id_token": dataSourceGoogleServiceAccountIdToken(), + "google_service_account_jwt": dataSourceGoogleServiceAccountJwt(), "google_service_account_key": dataSourceGoogleServiceAccountKey(), "google_sourcerepo_repository": dataSourceGoogleSourceRepoRepository(), "google_spanner_instance": dataSourceSpannerInstance(), @@ -944,12 +931,14 @@ func Provider() *schema.Provider { return providerConfigure(ctx, d, provider) } + configureDCLProvider(provider) + return provider } -// Generated resources: 252 -// Generated IAM resources: 141 -// Total generated resources: 393 +// Generated resources: 256 +// Generated IAM resources: 168 +// Total generated resources: 424 func ResourceMap() map[string]*schema.Resource { resourceMap, _ := ResourceMapWithErrors() return resourceMap @@ -972,6 +961,7 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_access_context_manager_service_perimeters": resourceAccessContextManagerServicePerimeters(), "google_access_context_manager_service_perimeter_resource": resourceAccessContextManagerServicePerimeterResource(), "google_access_context_manager_gcp_user_access_binding": resourceAccessContextManagerGcpUserAccessBinding(), + "google_active_directory_peering": resourceActiveDirectoryPeering(), "google_active_directory_domain": resourceActiveDirectoryDomain(), "google_active_directory_domain_trust": resourceActiveDirectoryDomainTrust(), "google_api_gateway_api": resourceApiGatewayApi(), @@ -1015,6 +1005,9 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_bigquery_table_iam_policy": ResourceIamPolicy(BigQueryTableIamSchema, BigQueryTableIamUpdaterProducer, BigQueryTableIdParseFunc), "google_bigquery_routine": resourceBigQueryRoutine(), "google_bigquery_connection": resourceBigqueryConnectionConnection(), + "google_bigquery_connection_iam_binding": ResourceIamBinding(BigqueryConnectionConnectionIamSchema, BigqueryConnectionConnectionIamUpdaterProducer, BigqueryConnectionConnectionIdParseFunc), + "google_bigquery_connection_iam_member": ResourceIamMember(BigqueryConnectionConnectionIamSchema, BigqueryConnectionConnectionIamUpdaterProducer, BigqueryConnectionConnectionIdParseFunc), + "google_bigquery_connection_iam_policy": ResourceIamPolicy(BigqueryConnectionConnectionIamSchema, BigqueryConnectionConnectionIamUpdaterProducer, BigqueryConnectionConnectionIdParseFunc), "google_bigquery_data_transfer_config": resourceBigqueryDataTransferConfig(), "google_bigquery_reservation": resourceBigqueryReservationReservation(), "google_bigtable_app_profile": resourceBigtableAppProfile(), @@ -1026,6 +1019,8 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_binary_authorization_policy": resourceBinaryAuthorizationPolicy(), "google_certificate_manager_dns_authorization": resourceCertificateManagerDnsAuthorization(), "google_certificate_manager_certificate": resourceCertificateManagerCertificate(), + "google_certificate_manager_certificate_map": resourceCertificateManagerCertificateMap(), + "google_certificate_manager_certificate_map_entry": resourceCertificateManagerCertificateMapEntry(), "google_cloud_asset_project_feed": resourceCloudAssetProjectFeed(), "google_cloud_asset_folder_feed": resourceCloudAssetFolderFeed(), "google_cloud_asset_organization_feed": resourceCloudAssetOrganizationFeed(), @@ -1040,6 +1035,9 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_cloud_identity_group": resourceCloudIdentityGroup(), "google_cloud_identity_group_membership": resourceCloudIdentityGroupMembership(), "google_cloudiot_registry": resourceCloudIotDeviceRegistry(), + "google_cloudiot_registry_iam_binding": ResourceIamBinding(CloudIotDeviceRegistryIamSchema, CloudIotDeviceRegistryIamUpdaterProducer, CloudIotDeviceRegistryIdParseFunc), + "google_cloudiot_registry_iam_member": ResourceIamMember(CloudIotDeviceRegistryIamSchema, CloudIotDeviceRegistryIamUpdaterProducer, CloudIotDeviceRegistryIdParseFunc), + "google_cloudiot_registry_iam_policy": ResourceIamPolicy(CloudIotDeviceRegistryIamSchema, CloudIotDeviceRegistryIamUpdaterProducer, CloudIotDeviceRegistryIdParseFunc), "google_cloudiot_device": resourceCloudIotDevice(), "google_cloud_run_domain_mapping": resourceCloudRunDomainMapping(), "google_cloud_run_service": resourceCloudRunService(), @@ -1048,9 +1046,15 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_cloud_run_service_iam_policy": ResourceIamPolicy(CloudRunServiceIamSchema, CloudRunServiceIamUpdaterProducer, CloudRunServiceIdParseFunc), "google_cloud_scheduler_job": resourceCloudSchedulerJob(), "google_cloud_tasks_queue": resourceCloudTasksQueue(), + "google_cloud_tasks_queue_iam_binding": ResourceIamBinding(CloudTasksQueueIamSchema, CloudTasksQueueIamUpdaterProducer, CloudTasksQueueIdParseFunc), + "google_cloud_tasks_queue_iam_member": ResourceIamMember(CloudTasksQueueIamSchema, CloudTasksQueueIamUpdaterProducer, CloudTasksQueueIdParseFunc), + "google_cloud_tasks_queue_iam_policy": ResourceIamPolicy(CloudTasksQueueIamSchema, CloudTasksQueueIamUpdaterProducer, CloudTasksQueueIdParseFunc), "google_compute_address": resourceComputeAddress(), "google_compute_autoscaler": resourceComputeAutoscaler(), "google_compute_backend_bucket": resourceComputeBackendBucket(), + "google_compute_backend_bucket_iam_binding": ResourceIamBinding(ComputeBackendBucketIamSchema, ComputeBackendBucketIamUpdaterProducer, ComputeBackendBucketIdParseFunc), + "google_compute_backend_bucket_iam_member": ResourceIamMember(ComputeBackendBucketIamSchema, ComputeBackendBucketIamUpdaterProducer, ComputeBackendBucketIdParseFunc), + "google_compute_backend_bucket_iam_policy": ResourceIamPolicy(ComputeBackendBucketIamSchema, ComputeBackendBucketIamUpdaterProducer, ComputeBackendBucketIdParseFunc), "google_compute_backend_bucket_signed_url_key": resourceComputeBackendBucketSignedUrlKey(), "google_compute_backend_service": resourceComputeBackendService(), "google_compute_backend_service_iam_binding": ResourceIamBinding(ComputeBackendServiceIamSchema, ComputeBackendServiceIamUpdaterProducer, ComputeBackendServiceIdParseFunc), @@ -1115,6 +1119,9 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_compute_router_nat": resourceComputeRouterNat(), "google_compute_router_peer": resourceComputeRouterBgpPeer(), "google_compute_snapshot": resourceComputeSnapshot(), + "google_compute_snapshot_iam_binding": ResourceIamBinding(ComputeSnapshotIamSchema, ComputeSnapshotIamUpdaterProducer, ComputeSnapshotIdParseFunc), + "google_compute_snapshot_iam_member": ResourceIamMember(ComputeSnapshotIamSchema, ComputeSnapshotIamUpdaterProducer, ComputeSnapshotIdParseFunc), + "google_compute_snapshot_iam_policy": ResourceIamPolicy(ComputeSnapshotIamSchema, ComputeSnapshotIamUpdaterProducer, ComputeSnapshotIdParseFunc), "google_compute_ssl_certificate": resourceComputeSslCertificate(), "google_compute_managed_ssl_certificate": resourceComputeManagedSslCertificate(), "google_compute_region_ssl_certificate": resourceComputeRegionSslCertificate(), @@ -1165,7 +1172,17 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_data_loss_prevention_stored_info_type": resourceDataLossPreventionStoredInfoType(), "google_data_loss_prevention_deidentify_template": resourceDataLossPreventionDeidentifyTemplate(), "google_dataproc_autoscaling_policy": resourceDataprocAutoscalingPolicy(), + "google_dataproc_autoscaling_policy_iam_binding": ResourceIamBinding(DataprocAutoscalingPolicyIamSchema, DataprocAutoscalingPolicyIamUpdaterProducer, DataprocAutoscalingPolicyIdParseFunc), + "google_dataproc_autoscaling_policy_iam_member": ResourceIamMember(DataprocAutoscalingPolicyIamSchema, DataprocAutoscalingPolicyIamUpdaterProducer, DataprocAutoscalingPolicyIdParseFunc), + "google_dataproc_autoscaling_policy_iam_policy": ResourceIamPolicy(DataprocAutoscalingPolicyIamSchema, DataprocAutoscalingPolicyIamUpdaterProducer, DataprocAutoscalingPolicyIdParseFunc), "google_dataproc_metastore_service": resourceDataprocMetastoreService(), + "google_dataproc_metastore_service_iam_binding": ResourceIamBinding(DataprocMetastoreServiceIamSchema, DataprocMetastoreServiceIamUpdaterProducer, DataprocMetastoreServiceIdParseFunc), + "google_dataproc_metastore_service_iam_member": ResourceIamMember(DataprocMetastoreServiceIamSchema, DataprocMetastoreServiceIamUpdaterProducer, DataprocMetastoreServiceIdParseFunc), + "google_dataproc_metastore_service_iam_policy": ResourceIamPolicy(DataprocMetastoreServiceIamSchema, DataprocMetastoreServiceIamUpdaterProducer, DataprocMetastoreServiceIdParseFunc), + "google_dataproc_metastore_federation": resourceDataprocMetastoreFederation(), + "google_dataproc_metastore_federation_iam_binding": ResourceIamBinding(DataprocMetastoreFederationIamSchema, DataprocMetastoreFederationIamUpdaterProducer, DataprocMetastoreFederationIdParseFunc), + "google_dataproc_metastore_federation_iam_member": ResourceIamMember(DataprocMetastoreFederationIamSchema, DataprocMetastoreFederationIamUpdaterProducer, DataprocMetastoreFederationIdParseFunc), + "google_dataproc_metastore_federation_iam_policy": ResourceIamPolicy(DataprocMetastoreFederationIamSchema, DataprocMetastoreFederationIamUpdaterProducer, DataprocMetastoreFederationIdParseFunc), "google_datastore_index": resourceDatastoreIndex(), "google_deployment_manager_deployment": resourceDeploymentManagerDeployment(), "google_dialogflow_agent": resourceDialogflowAgent(), @@ -1198,6 +1215,9 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_game_services_game_server_config": resourceGameServicesGameServerConfig(), "google_game_services_game_server_deployment_rollout": resourceGameServicesGameServerDeploymentRollout(), "google_gke_hub_membership": resourceGKEHubMembership(), + "google_gke_hub_membership_iam_binding": ResourceIamBinding(GKEHubMembershipIamSchema, GKEHubMembershipIamUpdaterProducer, GKEHubMembershipIdParseFunc), + "google_gke_hub_membership_iam_member": ResourceIamMember(GKEHubMembershipIamSchema, GKEHubMembershipIamUpdaterProducer, GKEHubMembershipIdParseFunc), + "google_gke_hub_membership_iam_policy": ResourceIamPolicy(GKEHubMembershipIamSchema, GKEHubMembershipIamUpdaterProducer, GKEHubMembershipIdParseFunc), "google_healthcare_dataset": resourceHealthcareDataset(), "google_healthcare_dicom_store": resourceHealthcareDicomStore(), "google_healthcare_fhir_store": resourceHealthcareFhirStore(), @@ -1357,7 +1377,6 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { // ####### START handwritten resources ########### "google_app_engine_application": resourceAppEngineApplication(), "google_bigquery_table": resourceBigQueryTable(), - "google_bigquery_reservation_assignment": resourceBigqueryReservationAssignment(), "google_bigtable_gc_policy": resourceBigtableGCPolicy(), "google_bigtable_instance": resourceBigtableInstance(), "google_bigtable_table": resourceBigtableTable(), @@ -1434,38 +1453,6 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_storage_transfer_job": resourceStorageTransferJob(), // ####### END handwritten resources ########### }, - map[string]*schema.Resource{ - // ####### START tpgtools resources ########### - "google_apikeys_key": resourceApikeysKey(), - "google_assured_workloads_workload": resourceAssuredWorkloadsWorkload(), - "google_cloudbuild_worker_pool": resourceCloudbuildWorkerPool(), - "google_clouddeploy_delivery_pipeline": resourceClouddeployDeliveryPipeline(), - "google_clouddeploy_target": resourceClouddeployTarget(), - "google_compute_firewall_policy_association": resourceComputeFirewallPolicyAssociation(), - "google_compute_firewall_policy": resourceComputeFirewallPolicy(), - "google_compute_firewall_policy_rule": resourceComputeFirewallPolicyRule(), - "google_container_aws_cluster": resourceContainerAwsCluster(), - "google_container_aws_node_pool": resourceContainerAwsNodePool(), - "google_container_azure_client": resourceContainerAzureClient(), - "google_container_azure_cluster": resourceContainerAzureCluster(), - "google_container_azure_node_pool": resourceContainerAzureNodePool(), - "google_dataplex_lake": resourceDataplexLake(), - "google_dataproc_workflow_template": resourceDataprocWorkflowTemplate(), - "google_eventarc_trigger": resourceEventarcTrigger(), - "google_firebaserules_release": resourceFirebaserulesRelease(), - "google_firebaserules_ruleset": resourceFirebaserulesRuleset(), - "google_gke_hub_feature": resourceGkeHubFeature(), - "google_gke_hub_feature_membership": resourceGkeHubFeatureMembership(), - "google_logging_log_view": resourceLoggingLogView(), - "google_monitoring_monitored_project": resourceMonitoringMonitoredProject(), - "google_network_connectivity_hub": resourceNetworkConnectivityHub(), - "google_network_connectivity_spoke": resourceNetworkConnectivitySpoke(), - "google_org_policy_policy": resourceOrgPolicyPolicy(), - "google_os_config_os_policy_assignment": resourceOsConfigOsPolicyAssignment(), - "google_privateca_certificate_template": resourcePrivatecaCertificateTemplate(), - "google_recaptcha_enterprise_key": resourceRecaptchaEnterpriseKey(), - // ####### END tpgtools resources ########### - }, map[string]*schema.Resource{ // ####### START non-generated IAM resources ########### "google_bigtable_instance_iam_binding": ResourceIamBinding(IamBigtableInstanceSchema, NewBigtableInstanceUpdater, BigtableInstanceIdParseFunc), @@ -1530,6 +1517,7 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_service_account_iam_policy": ResourceIamPolicy(IamServiceAccountSchema, NewServiceAccountIamUpdater, ServiceAccountIdParseFunc), // ####### END non-generated IAM resources ########### }, + dclResources, ) } @@ -1709,20 +1697,8 @@ func providerConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr config.BigtableAdminBasePath = d.Get(BigtableAdminCustomEndpointEntryKey).(string) // dcl - config.ApikeysBasePath = d.Get(ApikeysEndpointEntryKey).(string) - config.AssuredWorkloadsBasePath = d.Get(AssuredWorkloadsEndpointEntryKey).(string) - config.ClouddeployBasePath = d.Get(ClouddeployEndpointEntryKey).(string) - config.CloudResourceManagerBasePath = d.Get(CloudResourceManagerEndpointEntryKey).(string) - config.DataplexBasePath = d.Get(DataplexEndpointEntryKey).(string) - config.EventarcBasePath = d.Get(EventarcEndpointEntryKey).(string) - config.FirebaserulesBasePath = d.Get(FirebaserulesEndpointEntryKey).(string) - config.GkeHubBasePath = d.Get(GkeHubFeatureCustomEndpointEntryKey).(string) - config.NetworkConnectivityBasePath = d.Get(NetworkConnectivityEndpointEntryKey).(string) - config.OrgPolicyBasePath = d.Get(OrgPolicyEndpointEntryKey).(string) - config.PrivatecaBasePath = d.Get(PrivatecaCertificateTemplateEndpointEntryKey).(string) config.ContainerAwsBasePath = d.Get(ContainerAwsCustomEndpointEntryKey).(string) config.ContainerAzureBasePath = d.Get(ContainerAzureCustomEndpointEntryKey).(string) - config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) stopCtx, ok := schema.StopContext(ctx) if !ok { @@ -1732,7 +1708,7 @@ func providerConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr return nil, diag.FromErr(err) } - return &config, nil + return providerDCLConfigure(d, &config), nil } func validateCredentials(v interface{}, k string) (warnings []string, errors []error) { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_client_creation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_client_creation.go index cfdd062e87..831544ef56 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_client_creation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_client_creation.go @@ -346,7 +346,7 @@ func NewDCLGkeHubClient(config *Config, userAgent, billingProject string, timeou dcl.WithHTTPClient(config.client), dcl.WithUserAgent(userAgent), dcl.WithLogger(dclLogger{}), - dcl.WithBasePath(config.GkeHubBasePath), + dcl.WithBasePath(config.GKEHubFeatureBasePath), } if timeout != 0 { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_endpoints.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_endpoints.go index 04caae2dd6..032b13be16 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_endpoints.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_endpoints.go @@ -67,33 +67,6 @@ var CloudResourceManagerEndpointEntry = &schema.Schema{ }, ""), } -var ComputeEndpointEntryKey = "compute_custom_endpoint" -var ComputeEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_COMPUTE_CUSTOM_ENDPOINT", - }, ""), -} - -var ContainerAwsEndpointEntryKey = "container_aws_custom_endpoint" -var ContainerAwsEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CONTAINER_AWS_CUSTOM_ENDPOINT", - }, ""), -} - -var ContainerAzureEndpointEntryKey = "container_azure_custom_endpoint" -var ContainerAzureEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CONTAINER_AZURE_CUSTOM_ENDPOINT", - }, ""), -} - var DataplexEndpointEntryKey = "dataplex_custom_endpoint" var DataplexEndpointEntry = &schema.Schema{ Type: schema.TypeString, @@ -121,21 +94,12 @@ var FirebaserulesEndpointEntry = &schema.Schema{ }, ""), } -var LoggingEndpointEntryKey = "logging_custom_endpoint" -var LoggingEndpointEntry = &schema.Schema{ +var GKEHubFeatureEndpointEntryKey = "gkehub_feature_custom_endpoint" +var GKEHubFeatureEndpointEntry = &schema.Schema{ Type: schema.TypeString, Optional: true, DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_LOGGING_CUSTOM_ENDPOINT", - }, ""), -} - -var MonitoringEndpointEntryKey = "monitoring_custom_endpoint" -var MonitoringEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_MONITORING_CUSTOM_ENDPOINT", + "GOOGLE_GKEHUB_FEATURE_CUSTOM_ENDPOINT", }, ""), } @@ -157,24 +121,6 @@ var OrgPolicyEndpointEntry = &schema.Schema{ }, ""), } -var OSConfigEndpointEntryKey = "os_config_custom_endpoint" -var OSConfigEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_OS_CONFIG_CUSTOM_ENDPOINT", - }, ""), -} - -var PrivatecaEndpointEntryKey = "privateca_custom_endpoint" -var PrivatecaEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", - }, ""), -} - var RecaptchaEnterpriseEndpointEntryKey = "recaptcha_enterprise_custom_endpoint" var RecaptchaEnterpriseEndpointEntry = &schema.Schema{ Type: schema.TypeString, @@ -184,62 +130,49 @@ var RecaptchaEnterpriseEndpointEntry = &schema.Schema{ }, ""), } -//Add new values to config.go.erb config object declaration -//ApikeysBasePath string -//AssuredWorkloadsBasePath string -//CloudBuildWorkerPoolBasePath string -//ClouddeployBasePath string -//CloudResourceManagerBasePath string -//ComputeBasePath string -//ContainerAwsBasePath string -//ContainerAzureBasePath string -//DataplexBasePath string -//EventarcBasePath string -//FirebaserulesBasePath string -//LoggingBasePath string -//MonitoringBasePath string -//NetworkConnectivityBasePath string -//OrgPolicyBasePath string -//OSConfigBasePath string -//PrivatecaBasePath string -//RecaptchaEnterpriseBasePath string - -//Add new values to provider.go.erb schema initialization -// ApikeysEndpointEntryKey: ApikeysEndpointEntry, -// AssuredWorkloadsEndpointEntryKey: AssuredWorkloadsEndpointEntry, -// CloudBuildWorkerPoolEndpointEntryKey: CloudBuildWorkerPoolEndpointEntry, -// ClouddeployEndpointEntryKey: ClouddeployEndpointEntry, -// CloudResourceManagerEndpointEntryKey: CloudResourceManagerEndpointEntry, -// ComputeEndpointEntryKey: ComputeEndpointEntry, -// ContainerAwsEndpointEntryKey: ContainerAwsEndpointEntry, -// ContainerAzureEndpointEntryKey: ContainerAzureEndpointEntry, -// DataplexEndpointEntryKey: DataplexEndpointEntry, -// EventarcEndpointEntryKey: EventarcEndpointEntry, -// FirebaserulesEndpointEntryKey: FirebaserulesEndpointEntry, -// LoggingEndpointEntryKey: LoggingEndpointEntry, -// MonitoringEndpointEntryKey: MonitoringEndpointEntry, -// NetworkConnectivityEndpointEntryKey: NetworkConnectivityEndpointEntry, -// OrgPolicyEndpointEntryKey: OrgPolicyEndpointEntry, -// OSConfigEndpointEntryKey: OSConfigEndpointEntry, -// PrivatecaEndpointEntryKey: PrivatecaEndpointEntry, -// RecaptchaEnterpriseEndpointEntryKey: RecaptchaEnterpriseEndpointEntry, - -//Add new values to provider.go.erb - provider block read -// config.ApikeysBasePath = d.Get(ApikeysEndpointEntryKey).(string) -// config.AssuredWorkloadsBasePath = d.Get(AssuredWorkloadsEndpointEntryKey).(string) -// config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) -// config.ClouddeployBasePath = d.Get(ClouddeployEndpointEntryKey).(string) -// config.CloudResourceManagerBasePath = d.Get(CloudResourceManagerEndpointEntryKey).(string) -// config.ComputeBasePath = d.Get(ComputeEndpointEntryKey).(string) -// config.ContainerAwsBasePath = d.Get(ContainerAwsEndpointEntryKey).(string) -// config.ContainerAzureBasePath = d.Get(ContainerAzureEndpointEntryKey).(string) -// config.DataplexBasePath = d.Get(DataplexEndpointEntryKey).(string) -// config.EventarcBasePath = d.Get(EventarcEndpointEntryKey).(string) -// config.FirebaserulesBasePath = d.Get(FirebaserulesEndpointEntryKey).(string) -// config.LoggingBasePath = d.Get(LoggingEndpointEntryKey).(string) -// config.MonitoringBasePath = d.Get(MonitoringEndpointEntryKey).(string) -// config.NetworkConnectivityBasePath = d.Get(NetworkConnectivityEndpointEntryKey).(string) -// config.OrgPolicyBasePath = d.Get(OrgPolicyEndpointEntryKey).(string) -// config.OSConfigBasePath = d.Get(OSConfigEndpointEntryKey).(string) -// config.PrivatecaBasePath = d.Get(PrivatecaEndpointEntryKey).(string) -// config.RecaptchaEnterpriseBasePath = d.Get(RecaptchaEnterpriseEndpointEntryKey).(string) +type DCLConfig struct { + ApikeysBasePath string + AssuredWorkloadsBasePath string + CloudBuildWorkerPoolBasePath string + ClouddeployBasePath string + CloudResourceManagerBasePath string + DataplexBasePath string + EventarcBasePath string + FirebaserulesBasePath string + GKEHubFeatureBasePath string + NetworkConnectivityBasePath string + OrgPolicyBasePath string + RecaptchaEnterpriseBasePath string +} + +func configureDCLProvider(provider *schema.Provider) { + provider.Schema[ApikeysEndpointEntryKey] = ApikeysEndpointEntry + provider.Schema[AssuredWorkloadsEndpointEntryKey] = AssuredWorkloadsEndpointEntry + provider.Schema[CloudBuildWorkerPoolEndpointEntryKey] = CloudBuildWorkerPoolEndpointEntry + provider.Schema[ClouddeployEndpointEntryKey] = ClouddeployEndpointEntry + provider.Schema[CloudResourceManagerEndpointEntryKey] = CloudResourceManagerEndpointEntry + provider.Schema[DataplexEndpointEntryKey] = DataplexEndpointEntry + provider.Schema[EventarcEndpointEntryKey] = EventarcEndpointEntry + provider.Schema[FirebaserulesEndpointEntryKey] = FirebaserulesEndpointEntry + provider.Schema[GKEHubFeatureEndpointEntryKey] = GKEHubFeatureEndpointEntry + provider.Schema[NetworkConnectivityEndpointEntryKey] = NetworkConnectivityEndpointEntry + provider.Schema[OrgPolicyEndpointEntryKey] = OrgPolicyEndpointEntry + provider.Schema[RecaptchaEnterpriseEndpointEntryKey] = RecaptchaEnterpriseEndpointEntry +} + +func providerDCLConfigure(d *schema.ResourceData, config *Config) interface{} { + config.ApikeysBasePath = d.Get(ApikeysEndpointEntryKey).(string) + config.AssuredWorkloadsBasePath = d.Get(AssuredWorkloadsEndpointEntryKey).(string) + config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) + config.ClouddeployBasePath = d.Get(ClouddeployEndpointEntryKey).(string) + config.CloudResourceManagerBasePath = d.Get(CloudResourceManagerEndpointEntryKey).(string) + config.DataplexBasePath = d.Get(DataplexEndpointEntryKey).(string) + config.EventarcBasePath = d.Get(EventarcEndpointEntryKey).(string) + config.FirebaserulesBasePath = d.Get(FirebaserulesEndpointEntryKey).(string) + config.GKEHubFeatureBasePath = d.Get(GKEHubFeatureEndpointEntryKey).(string) + config.NetworkConnectivityBasePath = d.Get(NetworkConnectivityEndpointEntryKey).(string) + config.OrgPolicyBasePath = d.Get(OrgPolicyEndpointEntryKey).(string) + config.RecaptchaEnterpriseBasePath = d.Get(RecaptchaEnterpriseEndpointEntryKey).(string) + config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) + return config +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_resources.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_resources.go new file mode 100644 index 0000000000..d5a1ce78b3 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_resources.go @@ -0,0 +1,54 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var dclResources = map[string]*schema.Resource{ + "google_apikeys_key": resourceApikeysKey(), + "google_assured_workloads_workload": resourceAssuredWorkloadsWorkload(), + "google_bigquery_reservation_assignment": resourceBigqueryReservationAssignment(), + "google_cloudbuild_worker_pool": resourceCloudbuildWorkerPool(), + "google_clouddeploy_delivery_pipeline": resourceClouddeployDeliveryPipeline(), + "google_clouddeploy_target": resourceClouddeployTarget(), + "google_compute_firewall_policy": resourceComputeFirewallPolicy(), + "google_compute_firewall_policy_association": resourceComputeFirewallPolicyAssociation(), + "google_compute_firewall_policy_rule": resourceComputeFirewallPolicyRule(), + "google_container_aws_cluster": resourceContainerAwsCluster(), + "google_container_aws_node_pool": resourceContainerAwsNodePool(), + "google_container_azure_client": resourceContainerAzureClient(), + "google_container_azure_cluster": resourceContainerAzureCluster(), + "google_container_azure_node_pool": resourceContainerAzureNodePool(), + "google_dataplex_asset": resourceDataplexAsset(), + "google_dataplex_lake": resourceDataplexLake(), + "google_dataplex_zone": resourceDataplexZone(), + "google_dataproc_workflow_template": resourceDataprocWorkflowTemplate(), + "google_eventarc_trigger": resourceEventarcTrigger(), + "google_firebaserules_release": resourceFirebaserulesRelease(), + "google_firebaserules_ruleset": resourceFirebaserulesRuleset(), + "google_gke_hub_feature": resourceGkeHubFeature(), + "google_gke_hub_feature_membership": resourceGkeHubFeatureMembership(), + "google_logging_log_view": resourceLoggingLogView(), + "google_monitoring_monitored_project": resourceMonitoringMonitoredProject(), + "google_network_connectivity_hub": resourceNetworkConnectivityHub(), + "google_network_connectivity_spoke": resourceNetworkConnectivitySpoke(), + "google_org_policy_policy": resourceOrgPolicyPolicy(), + "google_os_config_os_policy_assignment": resourceOsConfigOsPolicyAssignment(), + "google_privateca_certificate_template": resourcePrivatecaCertificateTemplate(), + "google_recaptcha_enterprise_key": resourceRecaptchaEnterpriseKey(), +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_handwritten_endpoint.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_handwritten_endpoint.go index e69331f529..19c015751e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_handwritten_endpoint.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_handwritten_endpoint.go @@ -128,17 +128,6 @@ var BigtableAdminCustomEndpointEntry = &schema.Schema{ }, DefaultBasePaths[BigtableAdminBasePathKey]), } -// GkeHubFeature uses a different base path "v1beta" than GkeHubMembership "v1beta1" -var GkeHubFeatureCustomEndpointEntryKey = "gkehub_feature_custom_endpoint" -var GkeHubFeatureCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_GKEHUB_FEATURE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[GkeHubFeatureBasePathKey]), -} - var PrivatecaCertificateTemplateEndpointEntryKey = "privateca_custom_endpoint" var PrivatecaCertificateTemplateCustomEndpointEntry = &schema.Schema{ Type: schema.TypeString, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/redis_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/redis_operation.go index 9b758d2761..19751500e3 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/redis_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/redis_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_sweeper_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_sweeper_test.go index fb44435247..f6ebf5576a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_sweeper_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_sweeper_test.go @@ -61,7 +61,7 @@ func testSweepActiveDirectoryDomain(region string) error { }, } - listTemplate := strings.Split("https://managedidentities.googleapis.com/v1/projects/{{project}}/locations/global/domains", "?")[0] + listTemplate := strings.Split("https://managedidentities.googleapis.com/v1beta1/projects/{{project}}/locations/global/domains", "?")[0] listUrl, err := replaceVars(d, config, listTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) @@ -99,7 +99,7 @@ func testSweepActiveDirectoryDomain(region string) error { continue } - deleteTemplate := "https://managedidentities.googleapis.com/v1/projects/{{project}}/locations/global/domains/{{domain_name}}" + deleteTemplate := "https://managedidentities.googleapis.com/v1beta1/projects/{{project}}/locations/global/domains/{{domain_name}}" deleteUrl, err := replaceVars(d, config, deleteTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_trust_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_trust_test.go index 93947ce9cd..ad4c0f94c7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_trust_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_trust_test.go @@ -10,6 +10,9 @@ import ( ) func TestAccActiveDirectoryDomainTrust_activeDirectoryDomainTrustBasicExample(t *testing.T) { + // skip the test until Active Directory setup issue got resolved + t.Skip() + // This test continues to fail due to AD setup required // Skipping in VCR to allow for fully successful test runs skipIfVcr(t) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_update_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_update_test.go index c293cf8a2a..b91dfec872 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_update_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_update_test.go @@ -2,12 +2,17 @@ package google import ( "fmt" + "strings" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccActiveDirectoryDomain_update(t *testing.T) { + // skip the test until Active Directory setup issue got resolved + t.Skip() + t.Parallel() domain := fmt.Sprintf("tf-test%s.org1.com", randString(t, 5)) @@ -78,3 +83,36 @@ func testAccADDomainUpdate(context map[string]interface{}) string { `, context) } + +func testAccCheckActiveDirectoryDomainDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_active_directory_domain" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + url, err := replaceVarsForTest(config, rs, "{{ActiveDirectoryBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil) + if err == nil { + return fmt.Errorf("ActiveDirectoryDomain still exists at %s", url) + } + } + + return nil + } +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_peering.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_peering.go new file mode 100644 index 0000000000..08fb57c2cc --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_peering.go @@ -0,0 +1,374 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceActiveDirectoryPeering() *schema.Resource { + return &schema.Resource{ + Create: resourceActiveDirectoryPeeringCreate, + Read: resourceActiveDirectoryPeeringRead, + Update: resourceActiveDirectoryPeeringUpdate, + Delete: resourceActiveDirectoryPeeringDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "authorized_network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The full names of the Google Compute Engine networks to which the instance is connected. Caller needs to make sure that CIDR subnets do not overlap between networks, else peering creation will fail.`, + }, + "domain_resource": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Full domain resource path for the Managed AD Domain involved in peering. The resource path should be in the form projects/{projectId}/locations/global/domains/{domainName}`, + }, + "peering_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: ``, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Resource labels that can contain user-provided metadata`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "status": { + Type: schema.TypeString, + Optional: true, + Description: `The current state of this Peering.`, + }, + "status_message": { + Type: schema.TypeString, + Optional: true, + Description: `Additional information about the current status of this peering, if available.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Unique name of the peering in this scope including projects and location using the form: projects/{projectId}/locations/global/peerings/{peeringId}.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceActiveDirectoryPeeringCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandActiveDirectoryPeeringLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + authorizedNetworkProp, err := expandActiveDirectoryPeeringAuthorizedNetwork(d.Get("authorized_network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("authorized_network"); !isEmptyValue(reflect.ValueOf(authorizedNetworkProp)) && (ok || !reflect.DeepEqual(v, authorizedNetworkProp)) { + obj["authorizedNetwork"] = authorizedNetworkProp + } + domainResourceProp, err := expandActiveDirectoryPeeringDomainResource(d.Get("domain_resource"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("domain_resource"); !isEmptyValue(reflect.ValueOf(domainResourceProp)) && (ok || !reflect.DeepEqual(v, domainResourceProp)) { + obj["domainResource"] = domainResourceProp + } + statusMessageProp, err := expandActiveDirectoryPeeringStatusMessage(d.Get("status_message"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("status_message"); !isEmptyValue(reflect.ValueOf(statusMessageProp)) && (ok || !reflect.DeepEqual(v, statusMessageProp)) { + obj["statusMessage"] = statusMessageProp + } + + url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/peerings?peeringId={{peering_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Peering: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Peering: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Peering: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/domains/{{peering_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = activeDirectoryOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Peering", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Peering: %s", err) + } + + if err := d.Set("name", flattenActiveDirectoryPeeringName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = replaceVars(d, config, "projects/{{project}}/locations/global/domains/{{peering_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Peering %q: %#v", d.Id(), res) + + return resourceActiveDirectoryPeeringRead(d, meta) +} + +func resourceActiveDirectoryPeeringRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Peering: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("ActiveDirectoryPeering %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Peering: %s", err) + } + + if err := d.Set("name", flattenActiveDirectoryPeeringName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Peering: %s", err) + } + if err := d.Set("labels", flattenActiveDirectoryPeeringLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Peering: %s", err) + } + if err := d.Set("authorized_network", flattenActiveDirectoryPeeringAuthorizedNetwork(res["authorizedNetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading Peering: %s", err) + } + if err := d.Set("domain_resource", flattenActiveDirectoryPeeringDomainResource(res["domainResource"], d, config)); err != nil { + return fmt.Errorf("Error reading Peering: %s", err) + } + + return nil +} + +func resourceActiveDirectoryPeeringUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Peering: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandActiveDirectoryPeeringLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + statusMessageProp, err := expandActiveDirectoryPeeringStatusMessage(d.Get("status_message"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("status_message"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, statusMessageProp)) { + obj["statusMessage"] = statusMessageProp + } + + url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Peering %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating Peering %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Peering %q: %#v", d.Id(), res) + } + + err = activeDirectoryOperationWaitTime( + config, res, project, "Updating Peering", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceActiveDirectoryPeeringRead(d, meta) +} + +func resourceActiveDirectoryPeeringDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Peering: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/peerings/{{peering_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Peering %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "Peering") + } + + err = activeDirectoryOperationWaitTime( + config, res, project, "Deleting Peering", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Peering %q: %#v", d.Id(), res) + return nil +} + +func flattenActiveDirectoryPeeringName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenActiveDirectoryPeeringLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenActiveDirectoryPeeringAuthorizedNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenActiveDirectoryPeeringDomainResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandActiveDirectoryPeeringLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandActiveDirectoryPeeringAuthorizedNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandActiveDirectoryPeeringDomainResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandActiveDirectoryPeeringStatusMessage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_peering_sweeper_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_peering_sweeper_test.go new file mode 100644 index 0000000000..54d2fc46e1 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_peering_sweeper_test.go @@ -0,0 +1,128 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("ActiveDirectoryPeering", &resource.Sweeper{ + Name: "ActiveDirectoryPeering", + F: testSweepActiveDirectoryPeering, + }) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepActiveDirectoryPeering(region string) error { + resourceName := "ActiveDirectoryPeering" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://managedidentities.googleapis.com/v1beta1/projects/{{project}}/locations/global/peerings", "?")[0] + listUrl, err := replaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["peerings"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !isSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://managedidentities.googleapis.com/v1beta1/projects/{{project}}/locations/global/peerings/{{peering_id}}" + deleteUrl, err := replaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance.go index a395421e3b..c7966e0433 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance.go @@ -210,7 +210,7 @@ func resourceApigeeInstanceCreate(d *schema.ResourceData, meta interface{}) erro billingProject = bp } - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isApigeeRetryableError) if err != nil { return fmt.Errorf("Error creating Instance: %s", err) } @@ -269,7 +269,7 @@ func resourceApigeeInstanceRead(d *schema.ResourceData, meta interface{}) error billingProject = bp } - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isApigeeRetryableError) if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("ApigeeInstance %q", d.Id())) } @@ -337,7 +337,7 @@ func resourceApigeeInstanceDelete(d *schema.ResourceData, meta interface{}) erro billingProject = bp } - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isApigeeRetryableError) if err != nil { return handleNotFoundError(err, d, "Instance") } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance_generated_test.go index cfb80ac397..210a472bad 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance_generated_test.go @@ -675,7 +675,7 @@ func testAccCheckApigeeInstanceDestroyProducer(t *testing.T) func(s *terraform.S billingProject = config.BillingProject } - _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil) + _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil, isApigeeRetryableError) if err == nil { return fmt.Errorf("ApigeeInstance still exists at %s", url) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key.go index 6e756c074a..e847d3bbf0 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key.go @@ -49,7 +49,7 @@ func resourceApikeysKey() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The resource name of the key. The name must be unique within the project, must conform with RFC-1034, is restricted to lower-cased letters, and has a maximum length of 63 characters. In another word, the name must match the regular expression: [a-z]([a-z0-9-]{0,61}[a-z0-9])?.", + Description: "The resource name of the key. The name must be unique within the project, must conform with RFC-1034, is restricted to lower-cased letters, and has a maximum length of 63 characters. In another word, the name must match the regular expression: `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", }, "display_name": { @@ -81,6 +81,12 @@ func resourceApikeysKey() *schema.Resource { Sensitive: true, Description: "Output only. An encrypted and signed value held by this key. This field can be accessed only through the `GetKeyString` method.", }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Unique id in UUID4 format.", + }, }, } } @@ -233,12 +239,12 @@ func resourceApikeysKeyCreate(d *schema.ResourceData, meta interface{}) error { Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/global/keys/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -255,7 +261,7 @@ func resourceApikeysKeyCreate(d *schema.ResourceData, meta interface{}) error { } else { client.Config.BasePath = bp } - res, err := client.ApplyKey(context.Background(), obj, createDirective...) + res, err := client.ApplyKey(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -321,6 +327,9 @@ func resourceApikeysKeyRead(d *schema.ResourceData, meta interface{}) error { if err = d.Set("key_string", res.KeyString); err != nil { return fmt.Errorf("error setting key_string in state: %s", err) } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } return nil } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key_generated_test.go index 1629cd9bf2..0baf17d35d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key_generated_test.go @@ -462,6 +462,7 @@ func testAccCheckApikeysKeyDestroyProducer(t *testing.T) func(s *terraform.State DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), KeyString: dcl.StringOrNil(rs.Primary.Attributes["key_string"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), } client := NewDCLApikeysClient(config, config.userAgent, billingProject, 0) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_artifact_registry_repository_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_artifact_registry_repository_generated_test.go index 1b071b9259..cab7443672 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_artifact_registry_repository_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_artifact_registry_repository_generated_test.go @@ -32,7 +32,7 @@ func TestAccArtifactRegistryRepository_artifactRegistryRepositoryBasicExample(t vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckArtifactRegistryRepositoryDestroyProducer(t), Steps: []resource.TestStep{ { @@ -51,12 +51,10 @@ func TestAccArtifactRegistryRepository_artifactRegistryRepositoryBasicExample(t func testAccArtifactRegistryRepository_artifactRegistryRepositoryBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_artifact_registry_repository" "my-repo" { - provider = google-beta - - location = "us-central1" + location = "us-central1" repository_id = "tf-test-my-repository%{random_suffix}" - description = "example docker repository%{random_suffix}" - format = "DOCKER" + description = "example docker repository%{random_suffix}" + format = "DOCKER" } `, context) } @@ -71,7 +69,7 @@ func TestAccArtifactRegistryRepository_artifactRegistryRepositoryCmekExample(t * vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckArtifactRegistryRepositoryDestroyProducer(t), Steps: []resource.TestStep{ { @@ -90,67 +88,11 @@ func TestAccArtifactRegistryRepository_artifactRegistryRepositoryCmekExample(t * func testAccArtifactRegistryRepository_artifactRegistryRepositoryCmekExample(context map[string]interface{}) string { return Nprintf(` resource "google_artifact_registry_repository" "my-repo" { - provider = google-beta - - location = "us-central1" - repository_id = "tf-test-my-repository%{random_suffix}" - description = "example docker repository with cmek" - format = "DOCKER" - kms_key_name = "%{kms_key_name}" -} -`, context) -} - -func TestAccArtifactRegistryRepository_artifactRegistryRepositoryIamExample(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": randString(t, 10), - } - - vcrTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, - CheckDestroy: testAccCheckArtifactRegistryRepositoryDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccArtifactRegistryRepository_artifactRegistryRepositoryIamExample(context), - }, - { - ResourceName: "google_artifact_registry_repository.my-repo", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"repository_id", "location"}, - }, - }, - }) -} - -func testAccArtifactRegistryRepository_artifactRegistryRepositoryIamExample(context map[string]interface{}) string { - return Nprintf(` -resource "google_artifact_registry_repository" "my-repo" { - provider = google-beta - - location = "us-central1" + location = "us-central1" repository_id = "tf-test-my-repository%{random_suffix}" - description = "example docker repository with iam%{random_suffix}" - format = "DOCKER" -} - -resource "google_service_account" "test-account" { - provider = google-beta - - account_id = "tf-test-my-account%{random_suffix}" - display_name = "Test Service Account" -} - -resource "google_artifact_registry_repository_iam_member" "test-iam" { - provider = google-beta - - location = google_artifact_registry_repository.my-repo.location - repository = google_artifact_registry_repository.my-repo.name - role = "roles/artifactregistry.reader" - member = "serviceAccount:${google_service_account.test-account.email}" + description = "example docker repository with cmek" + format = "DOCKER" + kms_key_name = "%{kms_key_name}" } `, context) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_assured_workloads_workload.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_assured_workloads_workload.go index 0e95583e91..e865bb1e9a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_assured_workloads_workload.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_assured_workloads_workload.go @@ -207,12 +207,12 @@ func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interfa ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), } - id, err := replaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -229,7 +229,7 @@ func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interfa } else { client.Config.BasePath = bp } - res, err := client.ApplyWorkload(context.Background(), obj, createDirective...) + res, err := client.ApplyWorkload(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -242,10 +242,11 @@ func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interfa if err = d.Set("name", res.Name); err != nil { return fmt.Errorf("error setting name in state: %s", err) } - // Id has a server-generated value, set again after creation - id, err = replaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") + // ID has a server-generated value, set again after creation. + + id, err = res.ID() if err != nil { - return fmt.Errorf("Error constructing id: %s", err) + return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_connection_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_connection_generated_test.go index 76ccf60498..28574f5a67 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_connection_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_connection_generated_test.go @@ -23,6 +23,47 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) +func TestAccBigqueryConnectionConnection_bigqueryConnectionCloudResourceExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + CheckDestroy: testAccCheckBigqueryConnectionConnectionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigqueryConnectionConnection_bigqueryConnectionCloudResourceExample(context), + }, + { + ResourceName: "google_bigquery_connection.connection", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location"}, + }, + }, + }) +} + +func testAccBigqueryConnectionConnection_bigqueryConnectionCloudResourceExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_bigquery_connection" "connection" { + connection_id = "tf-test-my-connection%{random_suffix}" + location = "US" + friendly_name = "👋" + description = "a riveting description" + cloud_resource {} +} +`, context) +} + func TestAccBigqueryConnectionConnection_bigqueryConnectionBasicExample(t *testing.T) { skipIfVcr(t) t.Parallel() @@ -178,47 +219,6 @@ resource "google_bigquery_connection" "connection" { `, context) } -func TestAccBigqueryConnectionConnection_bigqueryConnectionCloudResourceExample(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": randString(t, 10), - } - - vcrTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - ExternalProviders: map[string]resource.ExternalProvider{ - "random": {}, - "time": {}, - }, - CheckDestroy: testAccCheckBigqueryConnectionConnectionDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigqueryConnectionConnection_bigqueryConnectionCloudResourceExample(context), - }, - { - ResourceName: "google_bigquery_connection.connection", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location"}, - }, - }, - }) -} - -func testAccBigqueryConnectionConnection_bigqueryConnectionCloudResourceExample(context map[string]interface{}) string { - return Nprintf(` -resource "google_bigquery_connection" "connection" { - connection_id = "tf-test-my-connection%{random_suffix}" - location = "US" - friendly_name = "👋" - description = "a riveting description" - cloud_resource {} -} -`, context) -} - func TestAccBigqueryConnectionConnection_bigqueryConnectionAwsExample(t *testing.T) { t.Parallel() diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_connection_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_connection_test.go index de4b060891..df13f7672b 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_connection_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_connection_test.go @@ -106,7 +106,6 @@ resource "google_sql_database_instance" "instance" { } resource "google_sql_database" "db" { - provider = google-beta instance = google_sql_database_instance.instance.name name = "db2" } @@ -117,14 +116,12 @@ resource "random_password" "pwd" { } resource "google_sql_user" "user" { - provider = google-beta name = "username" instance = google_sql_database_instance.instance.name password = random_password.pwd.result } resource "google_bigquery_connection" "connection" { - provider = google-beta connection_id = "tf-test-my-connection%{random_suffix}" location = "US" friendly_name = "👋👋" diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config.go index 64cc4dab57..9f51dec6b7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config.go @@ -67,7 +67,6 @@ func resourceBigqueryDataTransferConfig() *schema.Resource { "display_name": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: `The user specified display name for the transfer config.`, }, "params": { @@ -75,7 +74,9 @@ func resourceBigqueryDataTransferConfig() *schema.Resource { Required: true, Description: `Parameters specific to each data source. For more information see the bq tab in the 'Setting up a data transfer' section for each data source. For example the parameters for Cloud Storage transfers are listed here: -https://cloud.google.com/bigquery-transfer/docs/cloud-storage-transfer#bq`, +https://cloud.google.com/bigquery-transfer/docs/cloud-storage-transfer#bq + +**NOTE** : If you are attempting to update a parameter that cannot be updated (due to api limitations) [please force recreation of the resource](https://www.terraform.io/cli/state/taint#forcing-re-creation-of-resources).`, Elem: &schema.Schema{Type: schema.TypeString}, }, "data_refresh_window_days": { @@ -458,6 +459,12 @@ func resourceBigqueryDataTransferConfigUpdate(d *schema.ResourceData, meta inter billingProject = project obj := make(map[string]interface{}) + displayNameProp, err := expandBigqueryDataTransferConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } destinationDatasetIdProp, err := expandBigqueryDataTransferConfigDestinationDatasetId(d.Get("destination_dataset_id"), d, config) if err != nil { return err @@ -520,6 +527,10 @@ func resourceBigqueryDataTransferConfigUpdate(d *schema.ResourceData, meta inter log.Printf("[DEBUG] Updating Config %q: %#v", d.Id(), obj) updateMask := []string{} + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + if d.HasChange("destination_dataset_id") { updateMask = append(updateMask, "destinationDatasetId") } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config_test.go index 2c0ab80538..90feba9124 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config_test.go @@ -47,7 +47,7 @@ func testAccBigqueryDataTransferConfig_scheduledQuery_basic(t *testing.T) { CheckDestroy: testAccCheckBigqueryDataTransferConfigDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccBigqueryDataTransferConfig_scheduledQuery(random_suffix, "third", start_time, end_time, "y"), + Config: testAccBigqueryDataTransferConfig_scheduledQuery(random_suffix, random_suffix, "third", start_time, end_time, "y"), }, { ResourceName: "google_bigquery_data_transfer_config.query_config", @@ -68,6 +68,7 @@ func testAccBigqueryDataTransferConfig_scheduledQuery_update(t *testing.T) { first_end_time := now.AddDate(0, 1, 0).Format(time.RFC3339) second_start_time := now.Add(2 * time.Hour).Format(time.RFC3339) second_end_time := now.AddDate(0, 2, 0).Format(time.RFC3339) + random_suffix2 := randString(t, 10) vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -75,10 +76,19 @@ func testAccBigqueryDataTransferConfig_scheduledQuery_update(t *testing.T) { CheckDestroy: testAccCheckBigqueryDataTransferConfigDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccBigqueryDataTransferConfig_scheduledQuery(random_suffix, "first", first_start_time, first_end_time, "y"), + Config: testAccBigqueryDataTransferConfig_scheduledQuery(random_suffix, random_suffix, "first", first_start_time, first_end_time, "y"), }, { - Config: testAccBigqueryDataTransferConfig_scheduledQuery(random_suffix, "second", second_start_time, second_end_time, "z"), + Config: testAccBigqueryDataTransferConfig_scheduledQuery(random_suffix, random_suffix, "second", second_start_time, second_end_time, "z"), + }, + { + ResourceName: "google_bigquery_data_transfer_config.query_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location"}, + }, + { + Config: testAccBigqueryDataTransferConfig_scheduledQuery(random_suffix, random_suffix2, "second", second_start_time, second_end_time, "z"), }, { ResourceName: "google_bigquery_data_transfer_config.query_config", @@ -185,7 +195,7 @@ func testAccCheckBigqueryDataTransferConfigDestroyProducer(t *testing.T) func(s } } -func testAccBigqueryDataTransferConfig_scheduledQuery(random_suffix, schedule, start_time, end_time, letter string) string { +func testAccBigqueryDataTransferConfig_scheduledQuery(random_suffix, random_suffix2, schedule, start_time, end_time, letter string) string { return fmt.Sprintf(` data "google_project" "project" {} @@ -233,7 +243,7 @@ resource "google_bigquery_data_transfer_config" "query_config" { query = "SELECT name FROM tabl WHERE x = '%s'" } } -`, random_suffix, random_suffix, random_suffix, schedule, start_time, end_time, letter) +`, random_suffix, random_suffix, random_suffix2, schedule, start_time, end_time, letter) } func testAccBigqueryDataTransferConfig_scheduledQuery_service_account(random_suffix string) string { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_dataset_access.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_dataset_access.go index 3d9f0e0697..df9c610c6e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_dataset_access.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_dataset_access.go @@ -53,19 +53,19 @@ func resourceBigQueryDatasetAccessIamMemberDiffSuppress(k, old, new string, d *s } if memberInState := d.Get("user_by_email").(string); memberInState != "" { - return memberInState == strippedIamMember + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) } if memberInState := d.Get("group_by_email").(string); memberInState != "" { - return memberInState == strippedIamMember + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) } if memberInState := d.Get("domain").(string); memberInState != "" { - return memberInState == strippedIamMember + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) } if memberInState := d.Get("special_group").(string); memberInState != "" { - return memberInState == strippedIamMember + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_job.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_job.go index ecc2be77b1..2a58dbe7aa 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_job.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_job.go @@ -664,6 +664,7 @@ The BigQuery Service Account associated with your project requires access to thi }, "destination_table": { Type: schema.TypeList, + Computed: true, Optional: true, ForceNew: true, Description: `Describes the table where the query results should be stored. diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_reservation_assignment.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_reservation_assignment.go index 87828ad307..110d8758e2 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_reservation_assignment.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_reservation_assignment.go @@ -118,7 +118,7 @@ func resourceBigqueryReservationAssignmentCreate(d *schema.ResourceData, meta in return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -135,7 +135,7 @@ func resourceBigqueryReservationAssignmentCreate(d *schema.ResourceData, meta in } else { client.Config.BasePath = bp } - res, err := client.ApplyAssignment(context.Background(), obj, createDirective...) + res, err := client.ApplyAssignment(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -145,7 +145,12 @@ func resourceBigqueryReservationAssignmentCreate(d *schema.ResourceData, meta in return fmt.Errorf("Error creating Assignment: %s", err) } - id, err = obj.ID() + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + // ID has a server-generated value, set again after creation. + + id, err = res.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance.go index f6440c7435..81fec4d44d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance.go @@ -521,6 +521,7 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch for i, e := range orderedClusters { if e == nil { orderedClusters[i] = elem + break } } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance_test.go index 39ed5256dd..50620311d6 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance_test.go @@ -96,6 +96,15 @@ func TestAccBigtableInstance_cluster(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back }, + { + Config: testAccBigtableInstance_clusterModifiedAgain(instanceName, 5), + }, + { + ResourceName: "google_bigtable_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back + }, }, }) } @@ -478,6 +487,51 @@ resource "google_bigtable_instance" "instance" { `, instanceName, instanceName, numNodes, instanceName, numNodes, instanceName, numNodes) } +// Add two clusters after testAccBigtableInstance_clusterModified. +func testAccBigtableInstance_clusterModifiedAgain(instanceName string, numNodes int) string { + return fmt.Sprintf(` +resource "google_bigtable_instance" "instance" { + name = "%s" + cluster { + cluster_id = "%s-c" + zone = "us-central1-c" + num_nodes = %d + storage_type = "HDD" + } + cluster { + cluster_id = "%s-a" + zone = "us-central1-a" + num_nodes = %d + storage_type = "HDD" + } + cluster { + cluster_id = "%s-b" + zone = "us-central1-b" + num_nodes = %d + storage_type = "HDD" + } + cluster { + cluster_id = "%s-asia-a" + zone = "asia-northeast1-a" + num_nodes = %d + storage_type = "HDD" + } + cluster { + cluster_id = "%s-asia-b" + zone = "asia-northeast1-b" + num_nodes = %d + storage_type = "HDD" + } + + deletion_protection = false + + labels = { + env = "default" + } +} +`, instanceName, instanceName, numNodes, instanceName, numNodes, instanceName, numNodes, instanceName, numNodes, instanceName, numNodes) +} + func testAccBigtableInstance_development(instanceName string) string { return fmt.Sprintf(` resource "google_bigtable_instance" "instance" { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_table.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_table.go index 8b424d7c1f..d3acfbd5ec 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_table.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_table.go @@ -276,7 +276,7 @@ func flattenColumnFamily(families []string) []map[string]interface{} { return result } -//TODO(rileykarson): Fix the stored import format after rebasing 3.0.0 +// TODO(rileykarson): Fix the stored import format after rebasing 3.0.0 func resourceBigtableTableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*Config) if err := parseImportId([]string{ diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget.go index f3bc607aac..612a4c4aad 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget.go @@ -26,6 +26,17 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) +// Check to see if a specified value in the config exists and suppress diffs if so. Otherwise run emptyOrDefaultStringSuppress. + +func checkValAndDefaultStringSuppress(defaultVal string, checkVal string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + if _, ok := d.GetOkExists(checkVal); ok { + return false + } + return (old == "" && new == defaultVal) || (new == "" && old == defaultVal) + } +} + func resourceBillingBudget() *schema.Resource { return &schema.Resource{ Create: resourceBillingBudgetCreate, @@ -114,31 +125,6 @@ is "USD", then 1 unit is one US dollar.`, ForceNew: true, Description: `ID of the billing account to set a budget on.`, }, - "threshold_rules": { - Type: schema.TypeList, - Required: true, - Description: `Rules that trigger alerts (notifications of thresholds being -crossed) when spend exceeds the specified percentages of the -budget.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "threshold_percent": { - Type: schema.TypeFloat, - Required: true, - Description: `Send an alert when this threshold is exceeded. This is a -1.0-based percentage, so 0.5 = 50%. Must be >= 0.`, - }, - "spend_basis": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"CURRENT_SPEND", "FORECASTED_SPEND", ""}), - Description: `The type of basis used to determine if spend has passed -the threshold. Default value: "CURRENT_SPEND" Possible values: ["CURRENT_SPEND", "FORECASTED_SPEND"]`, - Default: "CURRENT_SPEND", - }, - }, - }, - }, "all_updates_rule": { Type: schema.TypeList, Optional: true, @@ -164,6 +150,7 @@ Account Users IAM roles for the target account.`, channel in the form projects/{project_id}/notificationChannels/{channel_id}. A maximum of 5 channels are allowed.`, + MaxItems: 5, Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -198,20 +185,29 @@ spend against the budget.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "calendar_period": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"MONTH", "QUARTER", "YEAR", "CALENDAR_PERIOD_UNSPECIFIED", ""}), + DiffSuppressFunc: checkValAndDefaultStringSuppress("MONTH", "budget_filter.0.custom_period.0.start_date"), + Description: `A CalendarPeriod represents the abstract concept of a recurring time period that has a +canonical start. Grammatically, "the start of the current CalendarPeriod". +All calendar times begin at 12 AM US and Canadian Pacific Time (UTC-8). + +Exactly one of 'calendar_period', 'custom_period' must be provided. Possible values: ["MONTH", "QUARTER", "YEAR", "CALENDAR_PERIOD_UNSPECIFIED"]`, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, + }, "credit_types": { Type: schema.TypeList, Computed: true, Optional: true, - Description: `A set of subaccounts of the form billingAccounts/{account_id}, -specifying that usage from only this set of subaccounts should -be included in the budget. If a subaccount is set to the name of -the parent account, usage from the parent account will be included. -If the field is omitted, the report will include usage from the parent -account and all subaccounts, if they exist.`, + Description: `Optional. If creditTypesTreatment is INCLUDE_SPECIFIED_CREDITS, +this is a list of credit types to be subtracted from gross cost to determine the spend for threshold calculations. See a list of acceptable credit type values. +If creditTypesTreatment is not INCLUDE_SPECIFIED_CREDITS, this field must be empty.`, Elem: &schema.Schema{ Type: schema.TypeString, }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, "credit_types_treatment": { Type: schema.TypeString, @@ -220,7 +216,78 @@ account and all subaccounts, if they exist.`, Description: `Specifies how credits should be treated when determining spend for threshold calculations. Default value: "INCLUDE_ALL_CREDITS" Possible values: ["INCLUDE_ALL_CREDITS", "EXCLUDE_ALL_CREDITS", "INCLUDE_SPECIFIED_CREDITS"]`, Default: "INCLUDE_ALL_CREDITS", - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, + }, + "custom_period": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies to track usage from any start date (required) to any end date (optional). +This time period is static, it does not recur. + +Exactly one of 'calendar_period', 'custom_period' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_date": { + Type: schema.TypeList, + Required: true, + Description: `A start date is required. The start date must be after January 1, 2017.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 31), + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month.`, + }, + "month": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 12), + Description: `Month of a year. Must be from 1 to 12.`, + }, + "year": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 9999), + Description: `Year of the date. Must be from 1 to 9999.`, + }, + }, + }, + }, + "end_date": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. The end date of the time period. Budgets with elapsed end date won't be processed. +If unset, specifies to track all usage incurred since the startDate.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 31), + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month.`, + }, + "month": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 12), + Description: `Month of a year. Must be from 1 to 12.`, + }, + "year": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 9999), + Description: `Year of the date. Must be from 1 to 9999.`, + }, + }, + }, + }, + }, + }, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, "labels": { Type: schema.TypeMap, @@ -229,7 +296,7 @@ for threshold calculations. Default value: "INCLUDE_ALL_CREDITS" Possible values Description: `A single label and value pair specifying that usage from only this set of labeled resources should be included in the budget.`, Elem: &schema.Schema{Type: schema.TypeString}, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, "projects": { Type: schema.TypeSet, @@ -243,7 +310,7 @@ the usage occurred on.`, Type: schema.TypeString, }, Set: schema.HashString, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, "services": { Type: schema.TypeList, @@ -258,7 +325,7 @@ https://cloud.google.com/billing/v1/how-tos/catalog-api.`, Elem: &schema.Schema{ Type: schema.TypeString, }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, "subaccounts": { Type: schema.TypeList, @@ -273,7 +340,7 @@ account and all subaccounts, if they exist.`, Elem: &schema.Schema{ Type: schema.TypeString, }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, }, }, @@ -283,6 +350,31 @@ account and all subaccounts, if they exist.`, Optional: true, Description: `User data for display name in UI. Must be <= 60 chars.`, }, + "threshold_rules": { + Type: schema.TypeList, + Optional: true, + Description: `Rules that trigger alerts (notifications of thresholds being +crossed) when spend exceeds the specified percentages of the +budget.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "threshold_percent": { + Type: schema.TypeFloat, + Required: true, + Description: `Send an alert when this threshold is exceeded. This is a +1.0-based percentage, so 0.5 = 50%. Must be >= 0.`, + }, + "spend_basis": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"CURRENT_SPEND", "FORECASTED_SPEND", ""}), + Description: `The type of basis used to determine if spend has passed +the threshold. Default value: "CURRENT_SPEND" Possible values: ["CURRENT_SPEND", "FORECASTED_SPEND"]`, + Default: "CURRENT_SPEND", + }, + }, + }, + }, "name": { Type: schema.TypeString, Computed: true, @@ -468,7 +560,10 @@ func resourceBillingBudgetUpdate(d *schema.ResourceData, meta interface{}) error if d.HasChange("budget_filter") { updateMask = append(updateMask, "budgetFilter.projects", - "budgetFilter.labels") + "budgetFilter.labels", + "budgetFilter.calendarPeriod", + "budgetFilter.customPeriod", + "budgetFilter.services") } if d.HasChange("amount") { @@ -593,6 +688,10 @@ func flattenBillingBudgetBudgetFilter(v interface{}, d *schema.ResourceData, con flattenBillingBudgetBudgetFilterSubaccounts(original["subaccounts"], d, config) transformed["labels"] = flattenBillingBudgetBudgetFilterLabels(original["labels"], d, config) + transformed["calendar_period"] = + flattenBillingBudgetBudgetFilterCalendarPeriod(original["calendarPeriod"], d, config) + transformed["custom_period"] = + flattenBillingBudgetBudgetFilterCustomPeriod(original["customPeriod"], d, config) return []interface{}{transformed} } func flattenBillingBudgetBudgetFilterProjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -643,6 +742,161 @@ func flattenBillingBudgetBudgetFilterLabels(v interface{}, d *schema.ResourceDat return transformed } +func flattenBillingBudgetBudgetFilterCalendarPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenBillingBudgetBudgetFilterCustomPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["start_date"] = + flattenBillingBudgetBudgetFilterCustomPeriodStartDate(original["startDate"], d, config) + transformed["end_date"] = + flattenBillingBudgetBudgetFilterCustomPeriodEndDate(original["endDate"], d, config) + return []interface{}{transformed} +} +func flattenBillingBudgetBudgetFilterCustomPeriodStartDate(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenBillingBudgetBudgetFilterCustomPeriodStartDateYear(original["year"], d, config) + transformed["month"] = + flattenBillingBudgetBudgetFilterCustomPeriodStartDateMonth(original["month"], d, config) + transformed["day"] = + flattenBillingBudgetBudgetFilterCustomPeriodStartDateDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenBillingBudgetBudgetFilterCustomPeriodStartDateYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBillingBudgetBudgetFilterCustomPeriodStartDateMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBillingBudgetBudgetFilterCustomPeriodStartDateDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBillingBudgetBudgetFilterCustomPeriodEndDate(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenBillingBudgetBudgetFilterCustomPeriodEndDateYear(original["year"], d, config) + transformed["month"] = + flattenBillingBudgetBudgetFilterCustomPeriodEndDateMonth(original["month"], d, config) + transformed["day"] = + flattenBillingBudgetBudgetFilterCustomPeriodEndDateDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenBillingBudgetBudgetFilterCustomPeriodEndDateYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBillingBudgetBudgetFilterCustomPeriodEndDateMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBillingBudgetBudgetFilterCustomPeriodEndDateDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + func flattenBillingBudgetAmount(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -825,6 +1079,20 @@ func expandBillingBudgetBudgetFilter(v interface{}, d TerraformResourceData, con transformed["labels"] = transformedLabels } + transformedCalendarPeriod, err := expandBillingBudgetBudgetFilterCalendarPeriod(original["calendar_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCalendarPeriod); val.IsValid() && !isEmptyValue(val) { + transformed["calendarPeriod"] = transformedCalendarPeriod + } + + transformedCustomPeriod, err := expandBillingBudgetBudgetFilterCustomPeriod(original["custom_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomPeriod); val.IsValid() && !isEmptyValue(val) { + transformed["customPeriod"] = transformedCustomPeriod + } + return transformed, nil } @@ -860,6 +1128,126 @@ func expandBillingBudgetBudgetFilterLabels(v interface{}, d TerraformResourceDat return m, nil } +func expandBillingBudgetBudgetFilterCalendarPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStartDate, err := expandBillingBudgetBudgetFilterCustomPeriodStartDate(original["start_date"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartDate); val.IsValid() && !isEmptyValue(val) { + transformed["startDate"] = transformedStartDate + } + + transformedEndDate, err := expandBillingBudgetBudgetFilterCustomPeriodEndDate(original["end_date"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndDate); val.IsValid() && !isEmptyValue(val) { + transformed["endDate"] = transformedEndDate + } + + return transformed, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodStartDate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandBillingBudgetBudgetFilterCustomPeriodStartDateYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandBillingBudgetBudgetFilterCustomPeriodStartDateMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandBillingBudgetBudgetFilterCustomPeriodStartDateDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodStartDateYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodStartDateMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodStartDateDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodEndDate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandBillingBudgetBudgetFilterCustomPeriodEndDateYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandBillingBudgetBudgetFilterCustomPeriodEndDateMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandBillingBudgetBudgetFilterCustomPeriodEndDateDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodEndDateYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodEndDateMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodEndDateDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandBillingBudgetAmount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget_generated_test.go index a7b433edaa..22fee89457 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget_generated_test.go @@ -268,6 +268,136 @@ resource "google_monitoring_notification_channel" "notification_channel" { `, context) } +func TestAccBillingBudget_billingBudgetCustomperiodExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": getTestBillingAccountFromEnv(t), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBillingBudgetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBillingBudget_billingBudgetCustomperiodExample(context), + }, + { + ResourceName: "google_billing_budget.budget", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account"}, + }, + }, + }) +} + +func testAccBillingBudget_billingBudgetCustomperiodExample(context map[string]interface{}) string { + return Nprintf(` +data "google_billing_account" "account" { + billing_account = "%{billing_acct}" +} + +data "google_project" "project" { +} + +resource "google_billing_budget" "budget" { + billing_account = data.google_billing_account.account.id + display_name = "Example Billing Budget%{random_suffix}" + + budget_filter { + projects = ["projects/${data.google_project.project.number}"] + credit_types_treatment = "EXCLUDE_ALL_CREDITS" + services = ["services/24E6-581D-38E5"] # Bigquery + + custom_period { + start_date { + year = 2022 + month = 1 + day = 1 + } + end_date { + year = 2023 + month = 12 + day = 31 + } + } + } + + amount { + specified_amount { + currency_code = "USD" + units = "100000" + } + } + + threshold_rules { + threshold_percent = 0.5 + } + threshold_rules { + threshold_percent = 0.9 + } +} +`, context) +} + +func TestAccBillingBudget_billingBudgetOptionalExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": getTestBillingAccountFromEnv(t), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBillingBudgetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBillingBudget_billingBudgetOptionalExample(context), + }, + { + ResourceName: "google_billing_budget.budget", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account"}, + }, + }, + }) +} + +func testAccBillingBudget_billingBudgetOptionalExample(context map[string]interface{}) string { + return Nprintf(` +data "google_billing_account" "account" { + billing_account = "%{billing_acct}" +} + +resource "google_billing_budget" "budget" { + billing_account = data.google_billing_account.account.id + display_name = "Example Billing Budget%{random_suffix}" + + amount { + specified_amount { + currency_code = "USD" + units = "100000" + } + } + + all_updates_rule { + disable_default_iam_recipients = true + pubsub_topic = google_pubsub_topic.budget.id + } +} + +resource "google_pubsub_topic" "budget" { + name = "tf-test-example-topic%{random_suffix}" +} +`, context) +} + func testAccCheckBillingBudgetDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget_test.go index 7461515c5f..0f60bc7b34 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget_test.go @@ -94,6 +94,14 @@ func TestAccBillingBudget_billingBudgetUpdate(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccBillingBudget_billingBudgetCalendarUpdate(context), + }, + { + ResourceName: "google_billing_budget.budget", + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccBillingBudget_billingBudgetUpdate(context), }, @@ -102,6 +110,14 @@ func TestAccBillingBudget_billingBudgetUpdate(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccBillingBudget_billingBudgetCustomPeriodUpdate(context), + }, + { + ResourceName: "google_billing_budget.budget", + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -225,6 +241,7 @@ resource "google_billing_budget" "budget" { labels = { label1 = "bar2" } + services = ["services/24E6-581D-38E5"] # Bigquery } amount { @@ -249,6 +266,113 @@ resource "google_billing_budget" "budget" { `, context) } +func testAccBillingBudget_billingBudgetCalendarUpdate(context map[string]interface{}) string { + return Nprintf(` +resource "google_pubsub_topic" "topic1" { + name = "tf-test-billing-budget1-%{random_suffix}" +} +resource "google_pubsub_topic" "topic2" { + name = "tf-test-billing-budget2-%{random_suffix}" +} +data "google_billing_account" "account" { + billing_account = "%{billing_acct}" +} + +data "google_project" "project" { +} + +resource "google_billing_budget" "budget" { + billing_account = data.google_billing_account.account.id + display_name = "Example Billing Budget%{random_suffix}" + + budget_filter { + projects = [] + labels = { + label1 = "bar2" + } + calendar_period = "YEAR" + } + + amount { + specified_amount { + currency_code = "USD" + units = "2000" + } + } + + threshold_rules { + threshold_percent = 0.5 + } + threshold_rules { + threshold_percent = 0.9 + } + + all_updates_rule { + pubsub_topic = google_pubsub_topic.topic2.id + } +} +`, context) +} + +func testAccBillingBudget_billingBudgetCustomPeriodUpdate(context map[string]interface{}) string { + return Nprintf(` +resource "google_pubsub_topic" "topic1" { + name = "tf-test-billing-budget1-%{random_suffix}" +} +resource "google_pubsub_topic" "topic2" { + name = "tf-test-billing-budget2-%{random_suffix}" +} +data "google_billing_account" "account" { + billing_account = "%{billing_acct}" +} + +data "google_project" "project" { +} + +resource "google_billing_budget" "budget" { + billing_account = data.google_billing_account.account.id + display_name = "Example Billing Budget%{random_suffix}" + + budget_filter { + projects = [] + labels = { + label1 = "bar2" + } + custom_period { + start_date { + year = 2022 + month = 1 + day = 1 + } + end_date { + year = 2023 + month = 12 + day = 31 + } + } + } + + amount { + specified_amount { + currency_code = "USD" + units = "2000" + } + } + + threshold_rules { + threshold_percent = 0.5 + } + threshold_rules { + threshold_percent = 0.9 + } + + all_updates_rule { + pubsub_topic = google_pubsub_topic.topic2.id + } +} +`, context) +} + func TestBillingBudgetStateUpgradeV0(t *testing.T) { t.Parallel() diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate.go index 515ac34931..d4d8ecfee9 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate.go @@ -65,7 +65,7 @@ and all following characters must be a dash, underscore, letter or digit.`, "labels": { Type: schema.TypeMap, Optional: true, - Description: `Set of label tags associated with the EdgeCache resource.`, + Description: `Set of label tags associated with the Certificate resource.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "managed": { @@ -97,10 +97,63 @@ Wildcard domains are only supported with DNS challenge resolution`, Type: schema.TypeString, }, }, + "authorization_attempt_info": { + Type: schema.TypeList, + Computed: true, + Description: `Detailed state of the latest authorization attempt for each domain +specified for this Managed Certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable explanation for reaching the state. Provided to help +address the configuration issues. +Not guaranteed to be stable. For programmatic access use 'failure_reason' field.`, + }, + "domain": { + Type: schema.TypeString, + Computed: true, + Description: `Domain name of the authorization attempt.`, + }, + "failure_reason": { + Type: schema.TypeString, + Computed: true, + Description: `Reason for failure of the authorization attempt for the domain.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the domain for managed certificate issuance.`, + }, + }, + }, + }, + "provisioning_issue": { + Type: schema.TypeList, + Computed: true, + Description: `Information about issues with provisioning this Managed Certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable explanation about the issue. Provided to help address +the configuration issues. +Not guaranteed to be stable. For programmatic access use 'reason' field.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `Reason for provisioning failures.`, + }, + }, + }, + }, "state": { Type: schema.TypeString, Computed: true, - Description: `State of the managed certificate resource.`, + Description: `A state of this Managed Certificate.`, }, }, }, @@ -110,16 +163,15 @@ Wildcard domains are only supported with DNS challenge resolution`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"DEFAULT", "EDGE_CACHE", ""}), DiffSuppressFunc: certManagerDefaultScopeDiffSuppress, Description: `The scope of the certificate. -Certificates with default scope are served from core Google data centers. +DEFAULT: Certificates with default scope are served from core Google data centers. If unsure, choose this option. -Certificates with scope EDGE_CACHE are special-purposed certificates, +EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from non-core Google data centers. -Currently allowed only for managed certificates. Default value: "DEFAULT" Possible values: ["DEFAULT", "EDGE_CACHE"]`, +Currently allowed only for managed certificates.`, Default: "DEFAULT", }, "self_managed": { @@ -483,24 +535,88 @@ func flattenCertificateManagerCertificateManaged(v interface{}, d *schema.Resour return nil } transformed := make(map[string]interface{}) - transformed["state"] = - flattenCertificateManagerCertificateManagedState(original["state"], d, config) transformed["domains"] = flattenCertificateManagerCertificateManagedDomains(original["domains"], d, config) transformed["dns_authorizations"] = flattenCertificateManagerCertificateManagedDnsAuthorizations(original["dnsAuthorizations"], d, config) + transformed["state"] = + flattenCertificateManagerCertificateManagedState(original["state"], d, config) + transformed["provisioning_issue"] = + flattenCertificateManagerCertificateManagedProvisioningIssue(original["provisioningIssue"], d, config) + transformed["authorization_attempt_info"] = + flattenCertificateManagerCertificateManagedAuthorizationAttemptInfo(original["authorizationAttemptInfo"], d, config) return []interface{}{transformed} } +func flattenCertificateManagerCertificateManagedDomains(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return d.Get("managed.0.dns_authorizations") +} + func flattenCertificateManagerCertificateManagedState(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } -func flattenCertificateManagerCertificateManagedDomains(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCertificateManagerCertificateManagedProvisioningIssue(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["reason"] = + flattenCertificateManagerCertificateManagedProvisioningIssueReason(original["reason"], d, config) + transformed["details"] = + flattenCertificateManagerCertificateManagedProvisioningIssueDetails(original["details"], d, config) + return []interface{}{transformed} +} +func flattenCertificateManagerCertificateManagedProvisioningIssueReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } -func flattenCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return d.Get("managed.0.dns_authorizations") +func flattenCertificateManagerCertificateManagedProvisioningIssueDetails(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfo(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "domain": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(original["domain"], d, config), + "state": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoState(original["state"], d, config), + "failure_reason": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(original["failureReason"], d, config), + "details": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(original["details"], d, config), + }) + } + return transformed +} +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v } func expandCertificateManagerCertificateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { @@ -565,6 +681,20 @@ func expandCertificateManagerCertificateManaged(v interface{}, d TerraformResour original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) + transformedDomains, err := expandCertificateManagerCertificateManagedDomains(original["domains"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDomains); val.IsValid() && !isEmptyValue(val) { + transformed["domains"] = transformedDomains + } + + transformedDnsAuthorizations, err := expandCertificateManagerCertificateManagedDnsAuthorizations(original["dns_authorizations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDnsAuthorizations); val.IsValid() && !isEmptyValue(val) { + transformed["dnsAuthorizations"] = transformedDnsAuthorizations + } + transformedState, err := expandCertificateManagerCertificateManagedState(original["state"], d, config) if err != nil { return nil, err @@ -572,31 +702,124 @@ func expandCertificateManagerCertificateManaged(v interface{}, d TerraformResour transformed["state"] = transformedState } - transformedDomains, err := expandCertificateManagerCertificateManagedDomains(original["domains"], d, config) + transformedProvisioningIssue, err := expandCertificateManagerCertificateManagedProvisioningIssue(original["provisioning_issue"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDomains); val.IsValid() && !isEmptyValue(val) { - transformed["domains"] = transformedDomains + } else if val := reflect.ValueOf(transformedProvisioningIssue); val.IsValid() && !isEmptyValue(val) { + transformed["provisioningIssue"] = transformedProvisioningIssue } - transformedDnsAuthorizations, err := expandCertificateManagerCertificateManagedDnsAuthorizations(original["dns_authorizations"], d, config) + transformedAuthorizationAttemptInfo, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfo(original["authorization_attempt_info"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDnsAuthorizations); val.IsValid() && !isEmptyValue(val) { - transformed["dnsAuthorizations"] = transformedDnsAuthorizations + } else if val := reflect.ValueOf(transformedAuthorizationAttemptInfo); val.IsValid() && !isEmptyValue(val) { + transformed["authorizationAttemptInfo"] = transformedAuthorizationAttemptInfo } return transformed, nil } +func expandCertificateManagerCertificateManagedDomains(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandCertificateManagerCertificateManagedState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandCertificateManagerCertificateManagedDomains(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCertificateManagerCertificateManagedProvisioningIssue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedReason, err := expandCertificateManagerCertificateManagedProvisioningIssueReason(original["reason"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReason); val.IsValid() && !isEmptyValue(val) { + transformed["reason"] = transformedReason + } + + transformedDetails, err := expandCertificateManagerCertificateManagedProvisioningIssueDetails(original["details"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDetails); val.IsValid() && !isEmptyValue(val) { + transformed["details"] = transformedDetails + } + + return transformed, nil +} + +func expandCertificateManagerCertificateManagedProvisioningIssueReason(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCertificateManagerCertificateManagedProvisioningIssueDetails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDomain, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(original["domain"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDomain); val.IsValid() && !isEmptyValue(val) { + transformed["domain"] = transformedDomain + } + + transformedState, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { + transformed["state"] = transformedState + } + + transformedFailureReason, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(original["failure_reason"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFailureReason); val.IsValid() && !isEmptyValue(val) { + transformed["failureReason"] = transformedFailureReason + } + + transformedDetails, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(original["details"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDetails); val.IsValid() && !isEmptyValue(val) { + transformed["details"] = transformedDetails + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map.go new file mode 100644 index 0000000000..cb8d7098d7 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map.go @@ -0,0 +1,478 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceCertificateManagerCertificateMap() *schema.Resource { + return &schema.Resource{ + Create: resourceCertificateManagerCertificateMapCreate, + Read: resourceCertificateManagerCertificateMapRead, + Update: resourceCertificateManagerCertificateMapUpdate, + Delete: resourceCertificateManagerCertificateMapDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCertificateManagerCertificateMapImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the Certificate Map. Certificate Map names must be unique +globally and match the pattern 'projects/*/locations/*/certificateMaps/*'.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Set of labels associated with a Certificate Map resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp of a Certificate Map. Timestamp is in RFC3339 UTC "Zulu" format, +accurate to nanoseconds with up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "gclb_targets": { + Type: schema.TypeList, + Computed: true, + Description: `A list of target proxies that use this Certificate Map`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_configs": { + Type: schema.TypeList, + Optional: true, + Description: `An IP configuration where this Certificate Map is serving`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Optional: true, + Description: `An external IP address`, + }, + "ports": { + Type: schema.TypeList, + Optional: true, + Description: `A list of ports`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "target_https_proxy": { + Type: schema.TypeString, + Optional: true, + Description: `Proxy name must be in the format projects/*/locations/*/targetHttpsProxies/*. +This field is part of a union field 'target_proxy': Only one of 'targetHttpsProxy' or +'targetSslProxy' may be set.`, + }, + "target_ssl_proxy": { + Type: schema.TypeString, + Optional: true, + Description: `Proxy name must be in the format projects/*/locations/*/targetSslProxies/*. +This field is part of a union field 'target_proxy': Only one of 'targetHttpsProxy' or +'targetSslProxy' may be set.`, + }, + }, + }, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Update timestamp of a Certificate Map. Timestamp is in RFC3339 UTC "Zulu" format, +accurate to nanoseconds with up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCertificateManagerCertificateMapCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps?certificateMapId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new CertificateMap: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating CertificateMap: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = certificateManagerOperationWaitTime( + config, res, project, "Creating CertificateMap", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create CertificateMap: %s", err) + } + + log.Printf("[DEBUG] Finished creating CertificateMap %q: %#v", d.Id(), res) + + return resourceCertificateManagerCertificateMapRead(d, meta) +} + +func resourceCertificateManagerCertificateMapRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("CertificateManagerCertificateMap %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + + if err := d.Set("description", flattenCertificateManagerCertificateMapDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("create_time", flattenCertificateManagerCertificateMapCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("update_time", flattenCertificateManagerCertificateMapUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("labels", flattenCertificateManagerCertificateMapLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("gclb_targets", flattenCertificateManagerCertificateMapGclbTargets(res["gclbTargets"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + + return nil +} + +func resourceCertificateManagerCertificateMapUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating CertificateMap %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating CertificateMap %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating CertificateMap %q: %#v", d.Id(), res) + } + + err = certificateManagerOperationWaitTime( + config, res, project, "Updating CertificateMap", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceCertificateManagerCertificateMapRead(d, meta) +} + +func resourceCertificateManagerCertificateMapDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting CertificateMap %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "CertificateMap") + } + + err = certificateManagerOperationWaitTime( + config, res, project, "Deleting CertificateMap", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting CertificateMap %q: %#v", d.Id(), res) + return nil +} + +func resourceCertificateManagerCertificateMapImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/global/certificateMaps/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCertificateManagerCertificateMapDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargets(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "ip_configs": flattenCertificateManagerCertificateMapGclbTargetsIpConfigs(original["ipConfigs"], d, config), + "target_https_proxy": flattenCertificateManagerCertificateMapGclbTargetsTargetHttpsProxy(original["targetHttpsProxy"], d, config), + "target_ssl_proxy": flattenCertificateManagerCertificateMapGclbTargetsTargetSslProxy(original["targetSslProxy"], d, config), + }) + } + return transformed +} +func flattenCertificateManagerCertificateMapGclbTargetsIpConfigs(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "ip_address": flattenCertificateManagerCertificateMapGclbTargetsIpConfigsIpAddress(original["ipAddress"], d, config), + "ports": flattenCertificateManagerCertificateMapGclbTargetsIpConfigsPorts(original["ports"], d, config), + }) + } + return transformed +} +func flattenCertificateManagerCertificateMapGclbTargetsIpConfigsIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargetsIpConfigsPorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargetsTargetHttpsProxy(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargetsTargetSslProxy(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandCertificateManagerCertificateMapDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry.go new file mode 100644 index 0000000000..99f16ffcaf --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry.go @@ -0,0 +1,523 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceCertificateManagerCertificateMapEntry() *schema.Resource { + return &schema.Resource{ + Create: resourceCertificateManagerCertificateMapEntryCreate, + Read: resourceCertificateManagerCertificateMapEntryRead, + Update: resourceCertificateManagerCertificateMapEntryUpdate, + Delete: resourceCertificateManagerCertificateMapEntryDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCertificateManagerCertificateMapEntryImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "certificates": { + Type: schema.TypeList, + Required: true, + DiffSuppressFunc: projectNumberDiffSuppress, + Description: `A set of Certificates defines for the given hostname. +There can be defined up to fifteen certificates in each Certificate Map Entry. +Each certificate must match pattern projects/*/locations/*/certificates/*.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "map": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: `A map entry that is inputted into the cetrificate map`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the Certificate Map Entry. Certificate Map Entry +names must be unique globally and match pattern +'projects/*/locations/*/certificateMaps/*/certificateMapEntries/*'`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "hostname": { + Type: schema.TypeString, + Optional: true, + Description: `A Hostname (FQDN, e.g. example.com) or a wildcard hostname expression (*.example.com) +for a set of hostnames with common suffix. Used as Server Name Indication (SNI) for +selecting a proper certificate.`, + ExactlyOneOf: []string{"hostname", "matcher"}, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Set of labels associated with a Certificate Map Entry. +An object containing a list of "key": value pairs. +Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "matcher": { + Type: schema.TypeString, + Optional: true, + Description: `A predefined matcher for particular cases, other than SNI selection`, + ExactlyOneOf: []string{"hostname", "matcher"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp of a Certificate Map Entry. Timestamp in RFC3339 UTC "Zulu" format, +with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `A serving state of this Certificate Map Entry.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Update timestamp of a Certificate Map Entry. Timestamp in RFC3339 UTC "Zulu" format, +with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCertificateManagerCertificateMapEntryCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapEntryDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapEntryLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + certificatesProp, err := expandCertificateManagerCertificateMapEntryCertificates(d.Get("certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificates"); !isEmptyValue(reflect.ValueOf(certificatesProp)) && (ok || !reflect.DeepEqual(v, certificatesProp)) { + obj["certificates"] = certificatesProp + } + hostnameProp, err := expandCertificateManagerCertificateMapEntryHostname(d.Get("hostname"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hostname"); !isEmptyValue(reflect.ValueOf(hostnameProp)) && (ok || !reflect.DeepEqual(v, hostnameProp)) { + obj["hostname"] = hostnameProp + } + matcherProp, err := expandCertificateManagerCertificateMapEntryMatcher(d.Get("matcher"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("matcher"); !isEmptyValue(reflect.ValueOf(matcherProp)) && (ok || !reflect.DeepEqual(v, matcherProp)) { + obj["matcher"] = matcherProp + } + nameProp, err := expandCertificateManagerCertificateMapEntryName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries?certificateMapEntryId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new CertificateMapEntry: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating CertificateMapEntry: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = certificateManagerOperationWaitTime( + config, res, project, "Creating CertificateMapEntry", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create CertificateMapEntry: %s", err) + } + + log.Printf("[DEBUG] Finished creating CertificateMapEntry %q: %#v", d.Id(), res) + + return resourceCertificateManagerCertificateMapEntryRead(d, meta) +} + +func resourceCertificateManagerCertificateMapEntryRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("CertificateManagerCertificateMapEntry %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + + if err := d.Set("description", flattenCertificateManagerCertificateMapEntryDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("create_time", flattenCertificateManagerCertificateMapEntryCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("update_time", flattenCertificateManagerCertificateMapEntryUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("labels", flattenCertificateManagerCertificateMapEntryLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("certificates", flattenCertificateManagerCertificateMapEntryCertificates(res["certificates"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("state", flattenCertificateManagerCertificateMapEntryState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("hostname", flattenCertificateManagerCertificateMapEntryHostname(res["hostname"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("matcher", flattenCertificateManagerCertificateMapEntryMatcher(res["matcher"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("name", flattenCertificateManagerCertificateMapEntryName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + + return nil +} + +func resourceCertificateManagerCertificateMapEntryUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapEntryDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapEntryLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + certificatesProp, err := expandCertificateManagerCertificateMapEntryCertificates(d.Get("certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificates"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificatesProp)) { + obj["certificates"] = certificatesProp + } + hostnameProp, err := expandCertificateManagerCertificateMapEntryHostname(d.Get("hostname"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hostname"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostnameProp)) { + obj["hostname"] = hostnameProp + } + matcherProp, err := expandCertificateManagerCertificateMapEntryMatcher(d.Get("matcher"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("matcher"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, matcherProp)) { + obj["matcher"] = matcherProp + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating CertificateMapEntry %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("certificates") { + updateMask = append(updateMask, "certificates") + } + + if d.HasChange("hostname") { + updateMask = append(updateMask, "hostname") + } + + if d.HasChange("matcher") { + updateMask = append(updateMask, "matcher") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating CertificateMapEntry %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating CertificateMapEntry %q: %#v", d.Id(), res) + } + + err = certificateManagerOperationWaitTime( + config, res, project, "Updating CertificateMapEntry", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceCertificateManagerCertificateMapEntryRead(d, meta) +} + +func resourceCertificateManagerCertificateMapEntryDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting CertificateMapEntry %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "CertificateMapEntry") + } + + err = certificateManagerOperationWaitTime( + config, res, project, "Deleting CertificateMapEntry", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting CertificateMapEntry %q: %#v", d.Id(), res) + return nil +} + +func resourceCertificateManagerCertificateMapEntryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/global/certificateMaps/(?P[^/]+)/certificateMapEntries/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCertificateManagerCertificateMapEntryDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryHostname(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + return NameFromSelfLinkStateFunc(v) +} + +func expandCertificateManagerCertificateMapEntryDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCertificateManagerCertificateMapEntryCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryHostname(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return GetResourceNameFromSelfLink(v.(string)), nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry_generated_test.go new file mode 100644 index 0000000000..bbeed5fa64 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry_generated_test.go @@ -0,0 +1,136 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccCertificateManagerCertificateMapEntry_certificateManagerCertificateMapEntryFullExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCertificateManagerCertificateMapEntryDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCertificateManagerCertificateMapEntry_certificateManagerCertificateMapEntryFullExample(context), + }, + { + ResourceName: "google_certificate_manager_certificate_map_entry.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"map"}, + }, + }, + }) +} + +func testAccCertificateManagerCertificateMapEntry_certificateManagerCertificateMapEntryFullExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_certificate_manager_certificate_map" "certificate_map" { + name = "tf-test-cert-map-entry%{random_suffix}" + description = "My acceptance test certificate map" + labels = { + "terraform" : true, + "acc-test" : true, + } +} + +resource "google_certificate_manager_certificate_map_entry" "default" { + name = "tf-test-cert-map-entry%{random_suffix}" + description = "My acceptance test certificate map entry" + map = google_certificate_manager_certificate_map.certificate_map.name + labels = { + "terraform" : true, + "acc-test" : true, + } + certificates = [google_certificate_manager_certificate.certificate.id] + matcher = "PRIMARY" +} + +resource "google_certificate_manager_certificate" "certificate" { + name = "tf-test-cert-map-entry%{random_suffix}" + description = "The default cert" + scope = "DEFAULT" + managed { + domains = [ + google_certificate_manager_dns_authorization.instance.domain, + google_certificate_manager_dns_authorization.instance2.domain, + ] + dns_authorizations = [ + google_certificate_manager_dns_authorization.instance.id, + google_certificate_manager_dns_authorization.instance2.id, + ] + } +} + + +resource "google_certificate_manager_dns_authorization" "instance" { + name = "tf-test-dns-auth%{random_suffix}" + description = "The default dnss" + domain = "subdomain%{random_suffix}.hashicorptest.com" +} + +resource "google_certificate_manager_dns_authorization" "instance2" { + name = "tf-test-dns-auth2%{random_suffix}" + description = "The default dnss" + domain = "subdomain2%{random_suffix}.hashicorptest.com" +} +`, context) +} + +func testAccCheckCertificateManagerCertificateMapEntryDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_certificate_manager_certificate_map_entry" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + url, err := replaceVarsForTest(config, rs, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil) + if err == nil { + return fmt.Errorf("CertificateManagerCertificateMapEntry still exists at %s", url) + } + } + + return nil + } +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry_sweeper_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry_sweeper_test.go new file mode 100644 index 0000000000..4509c31680 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry_sweeper_test.go @@ -0,0 +1,124 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("CertificateManagerCertificateMapEntry", &resource.Sweeper{ + Name: "CertificateManagerCertificateMapEntry", + F: testSweepCertificateManagerCertificateMapEntry, + }) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCertificateManagerCertificateMapEntry(region string) error { + resourceName := "CertificateManagerCertificateMapEntry" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries", "?")[0] + listUrl, err := replaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["certificateMapEntries"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !isSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}" + deleteUrl, err := replaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_generated_test.go similarity index 57% rename from third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_generated_test.go rename to third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_generated_test.go index 43db7df9ad..87b51a2874 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_domain_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_generated_test.go @@ -23,7 +23,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func TestAccActiveDirectoryDomain_activeDirectoryDomainBasicExample(t *testing.T) { +func TestAccCertificateManagerCertificateMap_certificateManagerCertificateMapBasicExample(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -33,35 +33,38 @@ func TestAccActiveDirectoryDomain_activeDirectoryDomainBasicExample(t *testing.T vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckActiveDirectoryDomainDestroyProducer(t), + CheckDestroy: testAccCheckCertificateManagerCertificateMapDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccActiveDirectoryDomain_activeDirectoryDomainBasicExample(context), + Config: testAccCertificateManagerCertificateMap_certificateManagerCertificateMapBasicExample(context), }, { - ResourceName: "google_active_directory_domain.ad-domain", + ResourceName: "google_certificate_manager_certificate_map.default", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"domain_name"}, + ImportStateVerifyIgnore: []string{"name"}, }, }, }) } -func testAccActiveDirectoryDomain_activeDirectoryDomainBasicExample(context map[string]interface{}) string { +func testAccCertificateManagerCertificateMap_certificateManagerCertificateMapBasicExample(context map[string]interface{}) string { return Nprintf(` -resource "google_active_directory_domain" "ad-domain" { - domain_name = "tfgen%{random_suffix}.org.com" - locations = ["us-central1"] - reserved_ip_range = "192.168.255.0/24" +resource "google_certificate_manager_certificate_map" "default" { + name = "tf-test-cert-map%{random_suffix}" + description = "My acceptance test certificate map" + labels = { + "terraform" : true, + "acc-test" : true, + } } `, context) } -func testAccCheckActiveDirectoryDomainDestroyProducer(t *testing.T) func(s *terraform.State) error { +func testAccCheckCertificateManagerCertificateMapDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { - if rs.Type != "google_active_directory_domain" { + if rs.Type != "google_certificate_manager_certificate_map" { continue } if strings.HasPrefix(name, "data.") { @@ -70,7 +73,7 @@ func testAccCheckActiveDirectoryDomainDestroyProducer(t *testing.T) func(s *terr config := googleProviderConfig(t) - url, err := replaceVarsForTest(config, rs, "{{ActiveDirectoryBasePath}}{{name}}") + url, err := replaceVarsForTest(config, rs, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") if err != nil { return err } @@ -83,7 +86,7 @@ func testAccCheckActiveDirectoryDomainDestroyProducer(t *testing.T) func(s *terr _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil) if err == nil { - return fmt.Errorf("ActiveDirectoryDomain still exists at %s", url) + return fmt.Errorf("CertificateManagerCertificateMap still exists at %s", url) } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_sweeper_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_sweeper_test.go new file mode 100644 index 0000000000..8bd98a1897 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_sweeper_test.go @@ -0,0 +1,124 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("CertificateManagerCertificateMap", &resource.Sweeper{ + Name: "CertificateManagerCertificateMap", + F: testSweepCertificateManagerCertificateMap, + }) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCertificateManagerCertificateMap(region string) error { + resourceName := "CertificateManagerCertificateMap" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/certificateMaps", "?")[0] + listUrl, err := replaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["certificateMaps"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !isSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/certificateMaps/{{name}}" + deleteUrl, err := replaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_dns_authorization.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_dns_authorization.go index dcfed30c3e..193d4a9767 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_dns_authorization.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_dns_authorization.go @@ -66,7 +66,7 @@ and all following characters must be a dash, underscore, letter or digit.`, "labels": { Type: schema.TypeMap, Optional: true, - Description: `Set of label tags associated with the EdgeCache resource.`, + Description: `Set of label tags associated with the DNS Authorization resource.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "dns_resource_record": { @@ -83,9 +83,10 @@ certificate.`, Description: `Data of the DNS Resource Record.`, }, "name": { - Type: schema.TypeString, - Computed: true, - Description: `Fully qualified name of the DNS Resource Record.`, + Type: schema.TypeString, + Computed: true, + Description: `Fully qualified name of the DNS Resource Record. +E.g. '_acme-challenge.example.com'.`, }, "type": { Type: schema.TypeString, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cgc_snippet_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cgc_snippet_generated_test.go index bb2cb66fbf..672c85c0af 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cgc_snippet_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cgc_snippet_generated_test.go @@ -58,7 +58,7 @@ resource "google_compute_instance" "default" { } boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -231,7 +231,7 @@ resource "google_compute_instance" "spot_vm_instance" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -239,6 +239,7 @@ resource "google_compute_instance" "spot_vm_instance" { preemptible = true automatic_restart = false provisioning_model = "SPOT" + instance_termination_action = "STOP" } network_interface { @@ -288,7 +289,7 @@ resource "google_compute_instance" "custom_hostname_instance" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } network_interface { @@ -302,6 +303,219 @@ resource "google_compute_instance" "custom_hostname_instance" { `, context) } +func TestAccCGCSnippet_computeReservationExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCGCSnippet_computeReservationExample(context), + }, + }, + }) +} + +func testAccCGCSnippet_computeReservationExample(context map[string]interface{}) string { + return Nprintf(` + +resource "google_compute_reservation" "gce_reservation_local" { + name = "tf-test-gce-reservation-local%{random_suffix}" + zone = "us-central1-c" + project = "%{project}" + + share_settings { + share_type = "LOCAL" + } + + specific_reservation { + count = 1 + instance_properties { + machine_type = "n2-standard-2" + } + } +} + +`, context) +} + +func TestAccCGCSnippet_instanceVirtualDisplayEnabledExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCGCSnippet_instanceVirtualDisplayEnabledExample(context), + }, + { + ResourceName: "google_compute_instance.instance_virtual_display", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCGCSnippet_instanceVirtualDisplayEnabledExample(context map[string]interface{}) string { + return Nprintf(` + +resource "google_compute_instance" "instance_virtual_display" { + name = "tf-test-instance-virtual-display%{random_suffix}" + machine_type = "f1-micro" + zone = "us-central1-c" + + # Set the below to true to enable virtual display + enable_display = true + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + network_interface { + # A default network is created for all GCP projects + network = "default" + access_config { + } + } +} + +`, context) +} + +func TestAccCGCSnippet_eventarcWorkflowsExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + Steps: []resource.TestStep{ + { + Config: testAccCGCSnippet_eventarcWorkflowsExample(context), + }, + { + ResourceName: "google_eventarc_trigger.trigger_pubsub_tf", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCGCSnippet_eventarcWorkflowsExample(context map[string]interface{}) string { + return Nprintf(` +# Used to retrieve project_number later +data "google_project" "project" { + provider = google-beta +} + +# Enable Eventarc API +resource "google_project_service" "eventarc" { + provider = google-beta + service = "eventarc.googleapis.com" + disable_on_destroy = false +} + +# Enable Pub/Sub API +resource "google_project_service" "pubsub" { + provider = google-beta + service = "pubsub.googleapis.com" + disable_on_destroy = false +} + +# Enable Workflows API +resource "google_project_service" "workflows" { + provider = google-beta + service = "workflows.googleapis.com" + disable_on_destroy = false +} + + + +# Create a service account for Eventarc trigger and Workflows +resource "google_service_account" "eventarc_workflows_service_account" { + provider = google-beta + account_id = "eventarc-workflows-sa" + display_name = "Eventarc Workflows Service Account" +} + +# Grant the logWriter role to the service account +resource "google_project_iam_binding" "project_binding_eventarc" { + provider = google-beta + project = data.google_project.project.id + role = "roles/logging.logWriter" + +members = ["serviceAccount:${google_service_account.eventarc_workflows_service_account.email}"] + + depends_on = [google_service_account.eventarc_workflows_service_account] +} + +# Grant the workflows.invoker role to the service account +resource "google_project_iam_binding" "project_binding_workflows" { + provider = google-beta + project = data.google_project.project.id + role = "roles/workflows.invoker" + +members = ["serviceAccount:${google_service_account.eventarc_workflows_service_account.email}"] + + depends_on = [google_service_account.eventarc_workflows_service_account] +} + + +# Define and deploy a workflow +resource "google_workflows_workflow" "workflows_example" { + name = "tf-test-pubsub-workflow-tf%{random_suffix}" + provider = google-beta + region = "us-central1" + description = "A sample workflow" + service_account = google_service_account.eventarc_workflows_service_account.id + # Imported main workflow YAML file + source_contents = templatefile("test-fixtures/workflow.yaml",{}) + + depends_on = [google_project_service.workflows, +google_service_account.eventarc_workflows_service_account] +} + + +# Create an Eventarc trigger routing Pub/Sub events to Workflows +resource "google_eventarc_trigger" "trigger_pubsub_tf" { + name = "tf-test-trigger-pubsub-workflow-tf%{random_suffix}" + provider = google-beta + location = "us-central1" + matching_criteria { + attribute = "type" + value = "google.cloud.pubsub.topic.v1.messagePublished" + } + destination { + workflow = google_workflows_workflow.workflows_example.id + } + + + service_account = google_service_account.eventarc_workflows_service_account.id + + depends_on = [google_project_service.pubsub, google_project_service.eventarc, +google_service_account.eventarc_workflows_service_account] +} + +`, context) +} + func TestAccCGCSnippet_sqlDatabaseInstanceSqlserverExample(t *testing.T) { skipIfVcr(t) t.Parallel() @@ -1087,3 +1301,65 @@ resource "google_pubsub_topic" "topic" { } `, context) } + +func TestAccCGCSnippet_storageStaticWebsiteExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCGCSnippet_storageStaticWebsiteExample(context), + }, + { + ResourceName: "google_storage_bucket.static_website", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCGCSnippet_storageStaticWebsiteExample(context map[string]interface{}) string { + return Nprintf(` +# Create new storage bucket in the US multi-region +# with coldline storage and settings for main_page_suffix and not_found_page +resource "google_storage_bucket" "static_website" { + name = "tf-test-static-website-bucket%{random_suffix}" + location = "US" + storage_class = "COLDLINE" + website { + main_page_suffix = "index.html%{random_suffix}" + not_found_page = "index.html%{random_suffix}" + } +} + +# Make bucket public by granting allUsers READER access +resource "google_storage_bucket_access_control" "public_rule" { + bucket = google_storage_bucket.static_website.id + role = "READER" + entity = "allUsers" +} + +# Upload a simple index.html page to the bucket +resource "google_storage_bucket_object" "indexpage" { + name = "index.html%{random_suffix}" + content = "Hello World!" + content_type = "text/html" + bucket = google_storage_bucket.static_website.id +} + +# Upload a simple 404 / error page to the bucket +resource "google_storage_bucket_object" "errorpage" { + name = "404.html%{random_suffix}" + content = "404!" + content_type = "text/html" + bucket = google_storage_bucket.static_website.id +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_run_service_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_run_service_generated_test.go index dfd6c5c312..e53e109329 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_run_service_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_run_service_generated_test.go @@ -134,6 +134,125 @@ resource "google_sql_database_instance" "instance" { `, context) } +func TestAccCloudRunService_cloudRunServiceConfigurationExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceConfigurationExample(context), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudRunServiceConfigurationExample(context map[string]interface{}) string { + return Nprintf(` +# Example configuration of a Cloud Run service + +resource "google_cloud_run_service" "default" { + name = "config%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + + # Container "entry-point" command + # https://cloud.google.com/run/docs/configuring/containers#configure-entrypoint + command = ["/server"] + + # Container "entry-point" args + # https://cloud.google.com/run/docs/configuring/containers#configure-entrypoint + args = [] + + # Enable HTTP/2 + # https://cloud.google.com/run/docs/configuring/http2 + ports { + name = "h2c" + container_port = 8080 + } + + # Environment variables + # https://cloud.google.com/run/docs/configuring/environment-variables + env { + name = "foo" + value = "bar" + } + env { + name = "baz" + value = "quux" + } + + resources { + limits = { + # CPU usage limit + # https://cloud.google.com/run/docs/configuring/cpu + cpu = "1000m" # 1 vCPU + + # Memory usage limit (per container) + # https://cloud.google.com/run/docs/configuring/memory-limits + memory = "512Mi" + } + } + } + + # Timeout + # https://cloud.google.com/run/docs/configuring/request-timeout + timeout_seconds = 300 + + # Maximum concurrent requests + # https://cloud.google.com/run/docs/configuring/concurrency + container_concurrency = 80 + } + + metadata { + annotations = { + + # Max instances + # https://cloud.google.com/run/docs/configuring/max-instances + "autoscaling.knative.dev/maxScale" = 10 + + # Min instances + # https://cloud.google.com/run/docs/configuring/min-instances + "autoscaling.knative.dev/minScale" = 1 + + # If true, garbage-collect CPU when once a request finishes + # https://cloud.google.com/run/docs/configuring/cpu-allocation + "run.googleapis.com/cpu-throttling" = false + } + + # Labels + # https://cloud.google.com/run/docs/configuring/labels + labels = { + foo : "bar" + baz : "quux" + } + } + } + + traffic { + percent = 100 + latest_revision = true + } +} +`, context) +} + func TestAccCloudRunService_cloudRunServiceNoauthExample(t *testing.T) { t.Parallel() @@ -162,6 +281,8 @@ func TestAccCloudRunService_cloudRunServiceNoauthExample(t *testing.T) { func testAccCloudRunService_cloudRunServiceNoauthExample(context map[string]interface{}) string { return Nprintf(` +# Example of how to deploy a publicly-accessible Cloud Run application + resource "google_cloud_run_service" "default" { name = "tf-test-cloudrun-srv%{random_suffix}" location = "us-central1" @@ -263,6 +384,249 @@ resource "google_cloud_run_service" "default" { `, context) } +func TestAccCloudRunService_cloudRunServiceStaticOutboundExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceStaticOutboundExample(context), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudRunServiceStaticOutboundExample(context map[string]interface{}) string { + return Nprintf(` +# Example of setting up a Cloud Run service with a static outbound IP + +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-cr-static-ip-network%{random_suffix}" +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-cr-static-ip%{random_suffix}" + ip_cidr_range = "10.124.0.0/28" + network = google_compute_network.default.id + region = "us-central1" +} + +resource "google_project_service" "vpc" { + provider = google-beta + service = "vpcaccess.googleapis.com" + disable_on_destroy = false +} + +resource "google_vpc_access_connector" "default" { + provider = google-beta + name = "tf-test-cr-conn%{random_suffix}" + + subnet { + name = google_compute_subnetwork.default.name + } + + # Wait for VPC API enablement + # before creating this resource + depends_on = [ + google_project_service.vpc + ] +} + +resource "google_compute_router" "default" { + provider = google-beta + name = "tf-test-cr-static-ip-router%{random_suffix}" + network = google_compute_network.default.name + region = google_compute_subnetwork.default.region +} + +resource "google_compute_address" "default" { + provider = google-beta + name = "tf-test-cr-static-ip-addr%{random_suffix}" + region = google_compute_subnetwork.default.region +} + +resource "google_compute_router_nat" "default" { + provider = google-beta + name = "tf-test-cr-static-nat%{random_suffix}" + router = google_compute_router.default.name + region = google_compute_subnetwork.default.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.default.self_link] + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.default.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } +} + +resource "google_cloud_run_service" "default" { + provider = google-beta + name = "tf-test-cr-static-ip-service%{random_suffix}" + location = google_compute_subnetwork.default.region + + template { + spec { + containers { + # Replace with the URL of your container + # gcr.io// + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } + + metadata { + annotations = { + "run.googleapis.com/vpc-access-connector" = google_vpc_access_connector.default.name + "run.googleapis.com/vpc-access-egress" = "all-traffic" + "run.googleapis.com/ingress" = "all" + } + } +} +`, context) +} + +func TestAccCloudRunService_cloudRunServiceScheduledExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceScheduledExample(context), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudRunServiceScheduledExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_project_service" "run_api" { + project = "%{project}" + service = "run.googleapis.com" + disable_dependent_services = true + disable_on_destroy = false +} + +resource "google_project_service" "iam_api" { + project = "%{project}" + service = "iam.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "resource_manager_api" { + project = "%{project}" + service = "cloudresourcemanager.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "scheduler_api" { + project = "%{project}" + service = "cloudscheduler.googleapis.com" + disable_on_destroy = false +} + +resource "google_cloud_run_service" "default" { + project = "%{project}" + name = "tf-test-my-scheduled-service%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } + + traffic { + percent = 100 + latest_revision = true + } + + # Use an explicit depends_on clause to wait until API is enabled + depends_on = [ + google_project_service.run_api + ] +} + +resource "google_service_account" "default" { + project = "%{project}" + account_id = "tf-test-scheduler-sa%{random_suffix}" + description = "Cloud Scheduler service account; used to trigger scheduled Cloud Run jobs." + display_name = "scheduler-sa" + + # Use an explicit depends_on clause to wait until API is enabled + depends_on = [ + google_project_service.iam_api + ] +} + +resource "google_cloud_scheduler_job" "default" { + name = "tf-test-scheduled-cloud-run-job%{random_suffix}" + description = "Invoke a Cloud Run container on a schedule." + schedule = "*/8 * * * *" + time_zone = "America/New_York" + attempt_deadline = "320s" + + retry_config { + retry_count = 1 + } + + http_target { + http_method = "POST" + uri = google_cloud_run_service.default.status[0].url + + oidc_token { + service_account_email = google_service_account.default.email + } + } + + # Use an explicit depends_on clause to wait until API is enabled + depends_on = [ + google_project_service.scheduler_api + ] +} + +resource "google_cloud_run_service_iam_member" "default" { + project = "%{project}" + location = google_cloud_run_service.default.location + service = google_cloud_run_service.default.name + role = "roles/run.invoker" + member = "serviceAccount:${google_service_account.default.email}" +} +`, context) +} + func TestAccCloudRunService_cloudRunServiceSecretEnvironmentVariablesExample(t *testing.T) { t.Parallel() @@ -458,7 +822,7 @@ resource "google_cloud_run_service" "default" { `, context) } -func TestAccCloudRunService_eventarcBasicTfExample(t *testing.T) { +func TestAccCloudRunService_cloudRunServiceIngressExample(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -471,7 +835,7 @@ func TestAccCloudRunService_eventarcBasicTfExample(t *testing.T) { CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccCloudRunService_eventarcBasicTfExample(context), + Config: testAccCloudRunService_cloudRunServiceIngressExample(context), }, { ResourceName: "google_cloud_run_service.default", @@ -483,38 +847,204 @@ func TestAccCloudRunService_eventarcBasicTfExample(t *testing.T) { }) } -func testAccCloudRunService_eventarcBasicTfExample(context map[string]interface{}) string { +func testAccCloudRunService_cloudRunServiceIngressExample(context map[string]interface{}) string { return Nprintf(` -# Used to retrieve project_number later -data "google_project" "project" { - provider = google-beta -} - -# Enable Cloud Run API -resource "google_project_service" "run" { +resource "google_cloud_run_service" "default" { provider = google-beta - service = "run.googleapis.com" - disable_on_destroy = false + name = "tf-test-ingress-service%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" #public image for your service + } + } + } + traffic { + percent = 100 + latest_revision = true + } + metadata { + annotations = { + # For valid annotation values and descriptions, see + # https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress + "run.googleapis.com/ingress" = "internal" + } + } } - -# Enable Eventarc API -resource "google_project_service" "eventarc" { - provider = google-beta - service = "eventarc.googleapis.com" - disable_on_destroy = false +`, context) } +func TestAccCloudRunService_cloudRunServiceInterserviceExample(t *testing.T) { + t.Parallel() - -# Deploy Cloud Run service -resource "google_cloud_run_service" "default" { - provider = google-beta - name = "tf-test-cloudrun-hello-tf%{random_suffix}" - location = "us-east1" + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } - template { - spec { - containers { + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceInterserviceExample(context), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudRunServiceInterserviceExample(context map[string]interface{}) string { + return Nprintf(` +# Example of using a public Cloud Run service to call a private one + +resource "google_cloud_run_service" "default" { + name = "tf-test-public-service%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + # TODO: replace this with a public service container + # (This service can be invoked by anyone on the internet) + image = "us-docker.pkg.dev/cloudrun/container/hello" + + # Include a reference to the private Cloud Run + # service's URL as an environment variable. + env { + name = "URL" + value = google_cloud_run_service.default_private.status[0].url + } + } + + # Give the "public" Cloud Run service + # a service account's identity + service_account_name = google_service_account.default.email + } + } +} + +data "google_iam_policy" "public" { + binding { + role = "roles/run.invoker" + members = [ + "allUsers", + ] + } +} + +resource "google_cloud_run_service_iam_policy" "public" { + location = google_cloud_run_service.default.location + project = google_cloud_run_service.default.project + service = google_cloud_run_service.default.name + + policy_data = data.google_iam_policy.public.policy_data +} + +resource "google_service_account" "default" { + account_id = "cloud-run-interservice-id" + description = "Identity used by a public Cloud Run service to call private Cloud Run services." + display_name = "cloud-run-interservice-id" +} + +resource "google_cloud_run_service" "default_private" { + name = "tf-test-private-service%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + // TODO: replace this with a private service container + // (This service should only be invocable by the public service) + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } +} + +data "google_iam_policy" "private" { + binding { + role = "roles/run.invoker" + members = [ + "serviceAccount:${google_service_account.default.email}", + ] + } +} + +resource "google_cloud_run_service_iam_policy" "private" { + location = google_cloud_run_service.default_private.location + project = google_cloud_run_service.default_private.project + service = google_cloud_run_service.default_private.name + + policy_data = data.google_iam_policy.private.policy_data +} +`, context) +} + +func TestAccCloudRunService_eventarcBasicTfExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_eventarcBasicTfExample(context), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunService_eventarcBasicTfExample(context map[string]interface{}) string { + return Nprintf(` +# Used to retrieve project_number later +data "google_project" "project" { + provider = google-beta +} + +# Enable Cloud Run API +resource "google_project_service" "run" { + provider = google-beta + service = "run.googleapis.com" + disable_on_destroy = false +} + +# Enable Eventarc API +resource "google_project_service" "eventarc" { + provider = google-beta + service = "eventarc.googleapis.com" + disable_on_destroy = false +} + + + +# Deploy Cloud Run service +resource "google_cloud_run_service" "default" { + provider = google-beta + name = "tf-test-cloudrun-hello-tf%{random_suffix}" + location = "us-east1" + + template { + spec { + containers { image = "gcr.io/cloudrun/hello" } } @@ -603,6 +1133,593 @@ resource "google_eventarc_trigger" "tf-test-trigger-auditlog-tf%{random_suffix}" `, context) } +func TestAccCloudRunService_cloudRunServiceMultipleRegionsExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceMultipleRegionsExample(context), + }, + }, + }) +} + +func testAccCloudRunService_cloudRunServiceMultipleRegionsExample(context map[string]interface{}) string { + return Nprintf(` +# Cloud Run service replicated across multiple GCP regions + +resource "google_project_service" "compute_api" { + provider = google-beta + project = "%{project}" + service = "compute.googleapis.com" + disable_dependent_services = true + disable_on_destroy = false +} + +resource "google_project_service" "run_api" { + provider = google-beta + project = "%{project}" + service = "run.googleapis.com" + disable_dependent_services = true + disable_on_destroy = false +} + +variable "domain_name" { + type = string + default = "example.com" +} + +variable "run_regions" { + type = list(string) + default = ["us-central1", "europe-west1"] +} + +resource "google_compute_global_address" "lb_default" { + provider = google-beta + name = "tf-test-myservice-service-ip%{random_suffix}" + project = "%{project}" + + # Use an explicit depends_on clause to wait until API is enabled + depends_on = [ + google_project_service.compute_api + ] +} + +resource "google_compute_backend_service" "lb_default" { + provider = google-beta + name = "tf-test-myservice-backend%{random_suffix}" + project = "%{project}" + load_balancing_scheme = "EXTERNAL_MANAGED" + + backend { + balancing_mode = "UTILIZATION" + capacity_scaler = 0.85 + group = google_compute_region_network_endpoint_group.lb_default[0].id + } + + backend { + balancing_mode = "UTILIZATION" + capacity_scaler = 0.85 + group = google_compute_region_network_endpoint_group.lb_default[1].id + } + + # Use an explicit depends_on clause to wait until API is enabled + depends_on = [ + google_project_service.compute_api, + ] +} + +resource "google_compute_url_map" "lb_default" { + provider = google-beta + name = "tf-test-myservice-lb-urlmap%{random_suffix}" + project = "%{project}" + default_service = google_compute_backend_service.lb_default.id + + path_matcher { + name = "allpaths" + default_service = google_compute_backend_service.lb_default.id + route_rules { + priority = 1 + url_redirect { + https_redirect = true + redirect_response_code = "MOVED_PERMANENTLY_DEFAULT" + } + } + } +} + +resource "google_compute_managed_ssl_certificate" "lb_default" { + provider = google-beta + name = "tf-test-myservice-ssl-cert%{random_suffix}" + project = "%{project}" + + managed { + domains = [var.domain_name] + } +} + +resource "google_compute_target_https_proxy" "lb_default" { + provider = google-beta + name = "tf-test-myservice-https-proxy%{random_suffix}" + project = "%{project}" + url_map = google_compute_url_map.lb_default.id + ssl_certificates = [ + google_compute_managed_ssl_certificate.lb_default.name + ] + depends_on = [ + google_compute_managed_ssl_certificate.lb_default + ] +} + +resource "google_compute_global_forwarding_rule" "lb_default" { + provider = google-beta + name = "tf-test-myservice-lb-fr%{random_suffix}" + project = "%{project}" + load_balancing_scheme = "EXTERNAL_MANAGED" + target = google_compute_target_https_proxy.lb_default.id + ip_address = google_compute_global_address.lb_default.id + port_range = "443" + depends_on = [google_compute_target_https_proxy.lb_default] +} + +resource "google_compute_region_network_endpoint_group" "lb_default" { + provider = google-beta + count = length(var.run_regions) + project = "%{project}" + name = "tf-test-myservice-neg%{random_suffix}" + network_endpoint_type = "SERVERLESS" + region = var.run_regions[count.index] + cloud_run { + service = google_cloud_run_service.run_default[count.index].name + } +} + +output "load_balancer_ip_addr" { + value = google_compute_global_address.lb_default.address +} + +resource "google_cloud_run_service" "run_default" { + provider = google-beta + count = length(var.run_regions) + project = "%{project}" + name = "tf-test-myservice-run-app%{random_suffix}-${var.run_regions[count.index]}" + location = var.run_regions[count.index] + + template { + spec { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } + + traffic { + percent = 100 + latest_revision = true + } + + # Use an explicit depends_on clause to wait until API is enabled + depends_on = [ + google_project_service.run_api + ] +} + +resource "google_cloud_run_service_iam_member" "run_allow_unauthenticated" { + provider = google-beta + count = length(var.run_regions) + project = "%{project}" + location = google_cloud_run_service.run_default[count.index].location + service = google_cloud_run_service.run_default[count.index].name + role = "roles/run.invoker" + member = "allUsers" +} + +resource "google_compute_url_map" "https_default" { + provider = google-beta + name = "tf-test-myservice-https-urlmap%{random_suffix}" + project = "%{project}" + + default_url_redirect { + redirect_response_code = "MOVED_PERMANENTLY_DEFAULT" + https_redirect = true + strip_query = false + } +} + +resource "google_compute_target_http_proxy" "https_default" { + provider = google-beta + name = "tf-test-myservice-http-proxy%{random_suffix}" + project = "%{project}" + url_map = google_compute_url_map.https_default.id + + depends_on = [ + google_compute_url_map.https_default + ] +} + +resource "google_compute_global_forwarding_rule" "https_default" { + provider = google-beta + name = "tf-test-myservice-https-fr%{random_suffix}" + project = "%{project}" + target = google_compute_target_http_proxy.https_default.id + ip_address = google_compute_global_address.lb_default.id + port_range = "80" + depends_on = [google_compute_target_http_proxy.https_default] +} +`, context) +} + +func TestAccCloudRunService_cloudrunServiceAccessControlExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudrunServiceAccessControlExample(context), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudrunServiceAccessControlExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_service" "default" { + name = "tf-test-cloud-run-srv%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + } + } + } + + traffic { + percent = 100 + latest_revision = true + } +} + +resource "google_cloud_run_service_iam_binding" "default" { + location = google_cloud_run_service.default.location + service = google_cloud_run_service.default.name + role = "roles/run.invoker" + members = [ + "allUsers" + ] +} +`, context) +} + +func TestAccCloudRunService_cloudRunSystemPackagesExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunSystemPackagesExample(context), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudRunSystemPackagesExample(context map[string]interface{}) string { + return Nprintf(` +# Example of how to deploy a Cloud Run application with system packages + +resource "google_cloud_run_service" "default" { + name = "tf-test-graphviz-example%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + # Replace with the URL of your graphviz image + # gcr.io//graphviz + image = "gcr.io/cloudrun/hello" + } + } + } + + traffic { + percent = 100 + latest_revision = true + } +} + +# Make Cloud Run service publicly accessible +resource "google_cloud_run_service_iam_member" "allow_unauthenticated" { + service = google_cloud_run_service.default.name + location = google_cloud_run_service.default.location + role = "roles/run.invoker" + member = "allUsers" +} +`, context) +} + +func TestAccCloudRunService_cloudRunServiceSecureServicesExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceSecureServicesExample(context), + }, + { + ResourceName: "google_cloud_run_service.renderer", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudRunServiceSecureServicesExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_service" "renderer" { + provider = google-beta + name = "renderer%{random_suffix}" + location = "us-central1" + template { + spec { + containers { + # Replace with the URL of your Secure Services > Renderer image. + # gcr.io//renderer + image = "gcr.io/cloudrun/hello" + } + service_account_name = google_service_account.renderer.email + } + } + traffic { + percent = 100 + latest_revision = true + } +} + +resource "google_cloud_run_service" "editor" { + provider = google-beta + name = "editor%{random_suffix}" + location = "us-central1" + template { + spec { + containers { + # Replace with the URL of your Secure Services > Editor image. + # gcr.io//editor + image = "gcr.io/cloudrun/hello" + env { + name = "EDITOR_UPSTREAM_RENDER_URL" + value = google_cloud_run_service.renderer.status[0].url + } + } + service_account_name = google_service_account.editor.email + } + } + traffic { + percent = 100 + latest_revision = true + } +} + +resource "google_service_account" "renderer" { + provider = google-beta + account_id = "renderer-identity" + display_name = "Service identity of the Renderer (Backend) service." +} + +resource "google_service_account" "editor" { + provider = google-beta + account_id = "editor-identity" + display_name = "Service identity of the Editor (Frontend) service." +} + +resource "google_cloud_run_service_iam_member" "editor_invokes_renderer" { + provider = google-beta + location = google_cloud_run_service.renderer.location + service = google_cloud_run_service.renderer.name + role = "roles/run.invoker" + member = "serviceAccount:${google_service_account.editor.email}" +} + +data "google_iam_policy" "noauth" { + provider = google-beta + binding { + role = "roles/run.invoker" + members = [ + "allUsers", + ] + } +} + +resource "google_cloud_run_service_iam_policy" "noauth" { + provider = google-beta + location = google_cloud_run_service.editor.location + project = google_cloud_run_service.editor.project + service = google_cloud_run_service.editor.name + + policy_data = data.google_iam_policy.noauth.policy_data +} + +output "backend_url" { + value = google_cloud_run_service.renderer.status[0].url +} + +output "frontend_url" { + value = google_cloud_run_service.editor.status[0].url +} +`, context) +} + +func TestAccCloudRunService_cloudRunServiceTasksExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceTasksExample(context), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudRunServiceTasksExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_service" "default" { + name = "tf-test-cloud-run-service-name%{random_suffix}" + location = "us-central1" + provider = google-beta + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + } + } + } + traffic { + percent = 100 + latest_revision = true + } +} + +resource "google_service_account" "sa" { + account_id = "cloud-run-task-invoker" + display_name = "Cloud Run Task Invoker" + provider = google-beta +} + +resource "google_cloud_run_service_iam_binding" "binding" { + location = google_cloud_run_service.default.location + service = google_cloud_run_service.default.name + role = "roles/run.invoker" + members = ["serviceAccount:${google_service_account.sa.email}"] + provider = google-beta + project = google_cloud_run_service.default.project +} + +resource "google_project_iam_binding" "project_binding" { + role = "roles/iam.serviceAccountTokenCreator" + members = ["serviceAccount:${google_service_account.sa.email}"] + provider = google-beta + project = google_cloud_run_service.default.project +} + +resource "google_cloud_tasks_queue" "default" { + name = "tf-test-cloud-tasks-queue-name%{random_suffix}" + location = "us-central1" + provider = google-beta +} +`, context) +} + +func TestAccCloudRunService_cloudrunServiceIdentityExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudrunServiceIdentityExample(context), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudrunServiceIdentityExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_service_account" "cloudrun_service_identity" { + account_id = "my-service-account" +} + +resource "google_cloud_run_service" "default" { + name = "tf-test-cloud-run-srv%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + } + service_account_name = google_service_account.cloudrun_service_identity.email + } + } + + traffic { + percent = 100 + latest_revision = true + } +} +`, context) +} + func testAccCheckCloudRunServiceDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job.go index 98df42b392..caff55cfaf 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job.go @@ -227,9 +227,10 @@ send a request to the targeted url`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "uri": { - Type: schema.TypeString, - Required: true, - Description: `The full URI path that the request will be sent to.`, + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: lastSlashDiffSuppress, + Description: `The full URI path that the request will be sent to.`, }, "body": { Type: schema.TypeString, @@ -306,6 +307,12 @@ the URI specified in target will be used.`, }, ExactlyOneOf: []string{"pubsub_target", "http_target", "app_engine_http_target"}, }, + "paused": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `Sets the job to a paused state. Jobs default to being enabled when this property is not set.`, + }, "pubsub_target": { Type: schema.TypeList, Optional: true, @@ -418,6 +425,11 @@ Values greater than 5 and negative values are not allowed.`, The value of this field must be a time zone name from the tz database.`, Default: "Etc/UTC", }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the job.`, + }, "project": { Type: schema.TypeString, Optional: true, @@ -461,6 +473,12 @@ func resourceCloudSchedulerJobCreate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { obj["timeZone"] = timeZoneProp } + pausedProp, err := expandCloudSchedulerJobPaused(d.Get("paused"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("paused"); !isEmptyValue(reflect.ValueOf(pausedProp)) && (ok || !reflect.DeepEqual(v, pausedProp)) { + obj["paused"] = pausedProp + } attemptDeadlineProp, err := expandCloudSchedulerJobAttemptDeadline(d.Get("attempt_deadline"), d, config) if err != nil { return err @@ -492,6 +510,11 @@ func resourceCloudSchedulerJobCreate(d *schema.ResourceData, meta interface{}) e obj["httpTarget"] = httpTargetProp } + obj, err = resourceCloudSchedulerJobEncoder(d, meta, obj) + if err != nil { + return err + } + url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs") if err != nil { return err @@ -523,6 +546,28 @@ func resourceCloudSchedulerJobCreate(d *schema.ResourceData, meta interface{}) e } d.SetId(id) + endpoint := "resume" // Default to enabled + logSuccessMsg := "Job state has been set to ENABLED" + if paused, pausedOk := d.GetOk("paused"); pausedOk && paused.(bool) { + endpoint = "pause" + logSuccessMsg = "Job state has been set to PAUSED" + } + + linkTmpl := fmt.Sprintf("{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}:%s", endpoint) + url, err = replaceVars(d, config, linkTmpl) + if err != nil { + return err + } + + emptyReqBody := make(map[string]interface{}) + + _, err = sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, emptyReqBody, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error setting Cloud Scheduler Job status: %s", err) + } + + log.Printf("[DEBUG] Finished updating Job %q status: %s", d.Id(), logSuccessMsg) + log.Printf("[DEBUG] Finished creating Job %q: %#v", d.Id(), res) return resourceCloudSchedulerJobRead(d, meta) @@ -582,6 +627,12 @@ func resourceCloudSchedulerJobRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("time_zone", flattenCloudSchedulerJobTimeZone(res["timeZone"], d, config)); err != nil { return fmt.Errorf("Error reading Job: %s", err) } + if err := d.Set("state", flattenCloudSchedulerJobState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("paused", flattenCloudSchedulerJobPaused(res["paused"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } if err := d.Set("attempt_deadline", flattenCloudSchedulerJobAttemptDeadline(res["attemptDeadline"], d, config)); err != nil { return fmt.Errorf("Error reading Job: %s", err) } @@ -635,6 +686,12 @@ func resourceCloudSchedulerJobUpdate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { obj["timeZone"] = timeZoneProp } + pausedProp, err := expandCloudSchedulerJobPaused(d.Get("paused"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("paused"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pausedProp)) { + obj["paused"] = pausedProp + } attemptDeadlineProp, err := expandCloudSchedulerJobAttemptDeadline(d.Get("attempt_deadline"), d, config) if err != nil { return err @@ -666,6 +723,11 @@ func resourceCloudSchedulerJobUpdate(d *schema.ResourceData, meta interface{}) e obj["httpTarget"] = httpTargetProp } + obj, err = resourceCloudSchedulerJobUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}") if err != nil { return err @@ -686,6 +748,31 @@ func resourceCloudSchedulerJobUpdate(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Finished updating Job %q: %#v", d.Id(), res) } + if d.HasChange("paused") { + endpoint := "resume" // Default to enabled + logSuccessMsg := "Job state has been set to ENABLED" + if paused, pausedOk := d.GetOk("paused"); pausedOk { + if paused.(bool) { + endpoint = "pause" + logSuccessMsg = "Job state has been set to PAUSED" + } + } + + linkTmpl := fmt.Sprintf("{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}:%s", endpoint) + url, err = replaceVars(d, config, linkTmpl) + if err != nil { + return err + } + + emptyReqBody := make(map[string]interface{}) + + _, err = sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, emptyReqBody, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error setting Cloud Scheduler Job status: %s", err) + } + + log.Printf("[DEBUG] Finished updating Job %q status: %s", d.Id(), logSuccessMsg) + } return resourceCloudSchedulerJobRead(d, meta) } @@ -766,6 +853,21 @@ func flattenCloudSchedulerJobTimeZone(v interface{}, d *schema.ResourceData, con return v } +func flattenCloudSchedulerJobState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudSchedulerJobPaused(v interface{}, d *schema.ResourceData, config *Config) interface{} { + state := d.Get("state") + if state == "PAUSED" { + return true + } + if state == "ENABLED" { + return false + } + return false // Job has an error state that's not paused or enabled +} + func flattenCloudSchedulerJobAttemptDeadline(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1065,6 +1167,10 @@ func expandCloudSchedulerJobTimeZone(v interface{}, d TerraformResourceData, con return v, nil } +func expandCloudSchedulerJobPaused(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandCloudSchedulerJobAttemptDeadline(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -1447,3 +1553,13 @@ func expandCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(v interface{} func expandCloudSchedulerJobHttpTargetOidcTokenAudience(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } + +func resourceCloudSchedulerJobEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + delete(obj, "paused") // Field doesn't exist in API + return obj, nil +} + +func resourceCloudSchedulerJobUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + delete(obj, "paused") // Field doesn't exist in API + return obj, nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job_generated_test.go index 951d27ed33..e3e09a9202 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job_generated_test.go @@ -115,6 +115,54 @@ resource "google_cloud_scheduler_job" "job" { `, context) } +func TestAccCloudSchedulerJob_schedulerJobPausedExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudSchedulerJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudSchedulerJob_schedulerJobPausedExample(context), + }, + { + ResourceName: "google_cloud_scheduler_job.job", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} + +func testAccCloudSchedulerJob_schedulerJobPausedExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_scheduler_job" "job" { + paused = true + name = "tf-test-test-job%{random_suffix}" + description = "test http job with updated fields" + schedule = "*/8 * * * *" + time_zone = "America/New_York" + attempt_deadline = "320s" + + retry_config { + retry_count = 1 + } + + http_target { + http_method = "POST" + uri = "https://example.com/ping" + body = base64encode("{\"foo\":\"bar\"}") + } +} +`, context) +} + func TestAccCloudSchedulerJob_schedulerJobAppEngineExample(t *testing.T) { t.Parallel() diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job_test.go index 4445094c72..d226fb6255 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job_test.go @@ -4,6 +4,7 @@ import ( "reflect" "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -93,3 +94,79 @@ func TestCloudScheduler_FlattenHttpHeaders(t *testing.T) { } } } + +func TestAccCloudSchedulerJob_schedulerPausedExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudSchedulerJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudSchedulerJob_schedulerPaused(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_scheduler_job.job", "paused", "true"), + resource.TestCheckResourceAttr("google_cloud_scheduler_job.job", "state", "PAUSED"), + ), + }, + { + Config: testAccCloudSchedulerJob_schedulerUnPaused(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_scheduler_job.job", "paused", "false"), + resource.TestCheckResourceAttr("google_cloud_scheduler_job.job", "state", "ENABLED"), + ), + }, + }, + }) +} + +func testAccCloudSchedulerJob_schedulerPaused(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_scheduler_job" "job" { + paused = true + name = "tf-test-test-job%{random_suffix}" + description = "test http job with updated fields" + schedule = "*/8 * * * *" + time_zone = "America/New_York" + attempt_deadline = "320s" + + retry_config { + retry_count = 1 + } + + http_target { + http_method = "POST" + uri = "https://example.com/ping" + body = base64encode("{\"foo\":\"bar\"}") + } +} +`, context) +} + +func testAccCloudSchedulerJob_schedulerUnPaused(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_scheduler_job" "job" { + paused = false # Has been flipped + name = "tf-test-test-job%{random_suffix}" + description = "test http job with updated fields" + schedule = "*/8 * * * *" + time_zone = "America/New_York" + attempt_deadline = "320s" + + retry_config { + retry_count = 1 + } + + http_target { + http_method = "POST" + uri = "https://example.com/ping" + body = base64encode("{\"foo\":\"bar\"}") + } +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudbuild_worker_pool.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudbuild_worker_pool.go index a3bb21b66e..83a48f9cf7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudbuild_worker_pool.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudbuild_worker_pool.go @@ -188,12 +188,12 @@ func resourceCloudbuildWorkerPoolCreate(d *schema.ResourceData, meta interface{} WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workerPools/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -210,7 +210,7 @@ func resourceCloudbuildWorkerPoolCreate(d *schema.ResourceData, meta interface{} } else { client.Config.BasePath = bp } - res, err := client.ApplyWorkerPool(context.Background(), obj, createDirective...) + res, err := client.ApplyWorkerPool(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_delivery_pipeline.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_delivery_pipeline.go index 5e0a1bead0..817e6cbf5a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_delivery_pipeline.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_delivery_pipeline.go @@ -249,12 +249,12 @@ func resourceClouddeployDeliveryPipelineCreate(d *schema.ResourceData, meta inte Suspended: dcl.Bool(d.Get("suspended").(bool)), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -271,7 +271,7 @@ func resourceClouddeployDeliveryPipelineCreate(d *schema.ResourceData, meta inte } else { client.Config.BasePath = bp } - res, err := client.ApplyDeliveryPipeline(context.Background(), obj, createDirective...) + res, err := client.ApplyDeliveryPipeline(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_target.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_target.go index 73b1fa80b2..7128595f2f 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_target.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_target.go @@ -83,6 +83,7 @@ func resourceClouddeployTarget() *schema.Resource { "execution_configs": { Type: schema.TypeList, + Computed: true, Optional: true, Description: "Configurations for all execution that relates to this `Target`. Each `ExecutionEnvironmentUsage` value may only be used in a single configuration; using the same value multiple times is an error. When one or more configurations are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage` values. When no configurations are specified, execution will use the default specified in `DefaultPool`.", Elem: ClouddeployTargetExecutionConfigsSchema(), @@ -177,12 +178,14 @@ func ClouddeployTargetExecutionConfigsSchema() *schema.Resource { "artifact_storage": { Type: schema.TypeString, + Computed: true, Optional: true, Description: "Optional. Cloud Storage location in which to store execution outputs. This can either be a bucket (\"gs://my-bucket\") or a path within a bucket (\"gs://my-bucket/my-dir\"). If unspecified, a default bucket located in the same region will be used.", }, "service_account": { Type: schema.TypeString, + Computed: true, Optional: true, Description: "Optional. Google service account to use for execution. If unspecified, the project execution service account (-compute@developer.gserviceaccount.com) is used.", }, @@ -236,12 +239,12 @@ func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) e RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/targets/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -258,7 +261,7 @@ func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) e } else { client.Config.BasePath = bp } - res, err := client.ApplyTarget(context.Background(), obj, createDirective...) + res, err := client.ApplyTarget(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -508,12 +511,12 @@ func flattenClouddeployTargetAnthosCluster(obj *clouddeploy.TargetAnthosCluster) } func expandClouddeployTargetExecutionConfigsArray(o interface{}) []clouddeploy.TargetExecutionConfigs { if o == nil { - return make([]clouddeploy.TargetExecutionConfigs, 0) + return nil } objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make([]clouddeploy.TargetExecutionConfigs, 0) + return nil } items := make([]clouddeploy.TargetExecutionConfigs, 0, len(objs)) @@ -527,14 +530,14 @@ func expandClouddeployTargetExecutionConfigsArray(o interface{}) []clouddeploy.T func expandClouddeployTargetExecutionConfigs(o interface{}) *clouddeploy.TargetExecutionConfigs { if o == nil { - return clouddeploy.EmptyTargetExecutionConfigs + return nil } obj := o.(map[string]interface{}) return &clouddeploy.TargetExecutionConfigs{ Usages: expandClouddeployTargetExecutionConfigsUsagesArray(obj["usages"]), - ArtifactStorage: dcl.String(obj["artifact_storage"].(string)), - ServiceAccount: dcl.String(obj["service_account"].(string)), + ArtifactStorage: dcl.StringOrNil(obj["artifact_storage"].(string)), + ServiceAccount: dcl.StringOrNil(obj["service_account"].(string)), WorkerPool: dcl.String(obj["worker_pool"].(string)), } } @@ -605,7 +608,6 @@ func flattenClouddeployTargetExecutionConfigsUsagesArray(obj []clouddeploy.Targe } return items } - func expandClouddeployTargetExecutionConfigsUsagesArray(o interface{}) []clouddeploy.TargetExecutionConfigsUsagesEnum { objs := o.([]interface{}) items := make([]clouddeploy.TargetExecutionConfigsUsagesEnum, 0, len(objs)) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunction2_function_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunction2_function_test.go index 5acd802d20..2a960a9833 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunction2_function_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunction2_function_test.go @@ -182,3 +182,147 @@ resource "google_cloudfunctions2_function" "terraform-test2" { } `, context) } + +func TestAccCloudFunctions2Function_fullUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": getTestProjectFromEnv(), + "zip_path": "./test-fixtures/cloudfunctions2/function-source-eventarc-gcs.zip", + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckCloudfunctions2functionDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Re-use config from the generated tests + Config: testAccCloudfunctions2function_cloudfunctions2BasicAuditlogsExample(context), + }, + { + Config: testAccCloudfunctions2function_cloudfunctions2BasicAuditlogsExample_update(context), + }, + }, + }) +} + +func testAccCloudfunctions2function_cloudfunctions2BasicAuditlogsExample_update(context map[string]interface{}) string { + return Nprintf(` +# [START functions_v2_basic_auditlogs] +# This example follows the examples shown in this Google Cloud Community blog post +# https://medium.com/google-cloud/applying-a-path-pattern-when-filtering-in-eventarc-f06b937b4c34 +# and the docs: +# https://cloud.google.com/eventarc/docs/path-patterns + +resource "google_storage_bucket" "source-bucket" { + provider = google-beta + name = "tf-test-gcf-source-bucket%{random_suffix}" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + provider = google-beta + name = "function-source.zip" + bucket = google_storage_bucket.source-bucket.name + source = "%{zip_path}" # Add path to the zipped function source code +} + +resource "google_service_account" "account" { + provider = google-beta + account_id = "tf-test-gcf-sa%{random_suffix}" + display_name = "Test Service Account - used for both the cloud function and eventarc trigger in the test" +} + +# Note: The right way of listening for Cloud Storage events is to use a Cloud Storage trigger. +# Here we use Audit Logs to monitor the bucket so path patterns can be used in the example of +# google_cloudfunctions2_function below (Audit Log events have path pattern support) +resource "google_storage_bucket" "audit-log-bucket" { + provider = google-beta + name = "tf-test-gcf-auditlog-bucket%{random_suffix}" + location = "us-central1" # The trigger must be in the same location as the bucket + uniform_bucket_level_access = true +} + +# Permissions on the service account used by the function and Eventarc trigger +resource "google_project_iam_member" "invoking" { + provider = google-beta + project = "%{project}" + role = "roles/run.invoker" + member = "serviceAccount:${google_service_account.account.email}" +} + +resource "google_project_iam_member" "event-receiving" { + provider = google-beta + project = "%{project}" + role = "roles/eventarc.eventReceiver" + member = "serviceAccount:${google_service_account.account.email}" +} + +resource "google_project_iam_member" "artifactregistry-reader" { + provider = google-beta + project = "%{project}" + role = "roles/artifactregistry.reader" + member = "serviceAccount:${google_service_account.account.email}" +} + +resource "google_cloudfunctions2_function" "function" { + provider = google-beta + depends_on = [ + google_project_iam_member.event-receiving, + google_project_iam_member.artifactregistry-reader, + ] + name = "tf-test-gcf-function%{random_suffix}" + location = "us-central1" + description = "a new function" + + build_config { + runtime = "nodejs12" + entry_point = "entryPoint" # Set the entry point in the code + environment_variables = { + BUILD_CONFIG_TEST = "build_test" + } + source { + storage_source { + bucket = google_storage_bucket.source-bucket.name + object = google_storage_bucket_object.object.name + } + } + } + + service_config { + max_instance_count = 3 + min_instance_count = 1 + available_memory = "256M" + timeout_seconds = 60 + environment_variables = { + SERVICE_CONFIG_TEST = "config_test" + } + ingress_settings = "ALLOW_INTERNAL_ONLY" + all_traffic_on_latest_revision = true + service_account_email = google_service_account.account.email + } + + event_trigger { + trigger_region = "us-central1" # The trigger must be in the same location as the bucket + event_type = "google.cloud.audit.log.v1.written" + retry_policy = "RETRY_POLICY_RETRY" + service_account_email = google_service_account.account.email + event_filters { + attribute = "serviceName" + value = "storage.googleapis.com" + } + event_filters { + attribute = "methodName" + value = "storage.objects.get" # Update: change value + } + event_filters { + attribute = "resourceName" + value = google_storage_bucket.audit-log-bucket.name # Update: stops using path pattern operator + } + } +} +# [END functions_v2_basic_auditlogs]`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function.go index 2aed46cd9c..e632e631b1 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function.go @@ -200,6 +200,13 @@ response to a condition in another service.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "event_filters": { + Type: schema.TypeSet, + Optional: true, + Description: `Criteria used to filter events.`, + Elem: cloudfunctions2functionEventTriggerEventFiltersSchema(), + // Default schema.HashSchema is used. + }, "event_type": { Type: schema.TypeString, Optional: true, @@ -207,6 +214,7 @@ response to a condition in another service.`, }, "pubsub_topic": { Type: schema.TypeString, + Computed: true, Optional: true, Description: `The name of a Pub/Sub topic in the same project that will be used as the transport topic for the event delivery.`, @@ -366,6 +374,35 @@ timeout period. Defaults to 60 seconds.`, } } +func cloudfunctions2functionEventTriggerEventFiltersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attribute": { + Type: schema.TypeString, + Required: true, + Description: `'Required. The name of a CloudEvents attribute. +Currently, only a subset of attributes are supported for filtering. Use the 'gcloud eventarc providers describe' command to learn more about events and their attributes. +Do not filter for the 'type' attribute here, as this is already achieved by the resource's 'event_type' attribute.`, + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: `Required. The value for the attribute. +If the operator field is set as 'match-path-pattern', this value can be a path pattern instead of an exact value.`, + }, + "operator": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. The operator used for matching the events with the value of +the filter. If not specified, only events that have an exact key-value +pair specified in the filter are matched. +The only allowed value is 'match-path-pattern'. +[See documentation on path patterns here](https://cloud.google.com/eventarc/docs/path-patterns)'`, + }, + }, + } +} + func resourceCloudfunctions2functionCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) userAgent, err := generateUserAgentString(d, config.userAgent) @@ -1027,6 +1064,8 @@ func flattenCloudfunctions2functionEventTrigger(v interface{}, d *schema.Resourc flattenCloudfunctions2functionEventTriggerTriggerRegion(original["triggerRegion"], d, config) transformed["event_type"] = flattenCloudfunctions2functionEventTriggerEventType(original["eventType"], d, config) + transformed["event_filters"] = + flattenCloudfunctions2functionEventTriggerEventFilters(original["eventFilters"], d, config) transformed["pubsub_topic"] = flattenCloudfunctions2functionEventTriggerPubsubTopic(original["pubsubTopic"], d, config) transformed["service_account_email"] = @@ -1047,6 +1086,38 @@ func flattenCloudfunctions2functionEventTriggerEventType(v interface{}, d *schem return v } +func flattenCloudfunctions2functionEventTriggerEventFilters(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(cloudfunctions2functionEventTriggerEventFiltersSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "attribute": flattenCloudfunctions2functionEventTriggerEventFiltersAttribute(original["attribute"], d, config), + "value": flattenCloudfunctions2functionEventTriggerEventFiltersValue(original["value"], d, config), + "operator": flattenCloudfunctions2functionEventTriggerEventFiltersOperator(original["operator"], d, config), + }) + } + return transformed +} +func flattenCloudfunctions2functionEventTriggerEventFiltersAttribute(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudfunctions2functionEventTriggerEventFiltersValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudfunctions2functionEventTriggerEventFiltersOperator(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenCloudfunctions2functionEventTriggerPubsubTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1519,6 +1590,13 @@ func expandCloudfunctions2functionEventTrigger(v interface{}, d TerraformResourc transformed["eventType"] = transformedEventType } + transformedEventFilters, err := expandCloudfunctions2functionEventTriggerEventFilters(original["event_filters"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEventFilters); val.IsValid() && !isEmptyValue(val) { + transformed["eventFilters"] = transformedEventFilters + } + transformedPubsubTopic, err := expandCloudfunctions2functionEventTriggerPubsubTopic(original["pubsub_topic"], d, config) if err != nil { return nil, err @@ -1555,6 +1633,55 @@ func expandCloudfunctions2functionEventTriggerEventType(v interface{}, d Terrafo return v, nil } +func expandCloudfunctions2functionEventTriggerEventFilters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAttribute, err := expandCloudfunctions2functionEventTriggerEventFiltersAttribute(original["attribute"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAttribute); val.IsValid() && !isEmptyValue(val) { + transformed["attribute"] = transformedAttribute + } + + transformedValue, err := expandCloudfunctions2functionEventTriggerEventFiltersValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { + transformed["value"] = transformedValue + } + + transformedOperator, err := expandCloudfunctions2functionEventTriggerEventFiltersOperator(original["operator"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOperator); val.IsValid() && !isEmptyValue(val) { + transformed["operator"] = transformedOperator + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudfunctions2functionEventTriggerEventFiltersAttribute(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudfunctions2functionEventTriggerEventFiltersValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudfunctions2functionEventTriggerEventFiltersOperator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandCloudfunctions2functionEventTriggerPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function_generated_test.go index ea7b7a1b6c..dbbf0d303b 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function_generated_test.go @@ -27,22 +27,22 @@ func TestAccCloudfunctions2function_cloudfunctions2BasicExample(t *testing.T) { t.Parallel() context := map[string]interface{}{ - "zip_path": "./test-fixtures/cloudfunctions2/function-source.zip", - "primary_resource_id": "terraform-test2", - "location": "us-central1", - "random_suffix": randString(t, 10), + "project": getTestProjectFromEnv(), + "zip_path": "./test-fixtures/cloudfunctions2/function-source.zip", + "location": "us-central1", + "random_suffix": randString(t, 10), } vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckCloudfunctions2functionDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccCloudfunctions2function_cloudfunctions2BasicExample(context), }, { - ResourceName: "google_cloudfunctions2_function.terraform-test2", + ResourceName: "google_cloudfunctions2_function.function", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"location", "build_config.0.source.0.storage_source.0.object", "build_config.0.source.0.storage_source.0.bucket"}, @@ -53,29 +53,31 @@ func TestAccCloudfunctions2function_cloudfunctions2BasicExample(t *testing.T) { func testAccCloudfunctions2function_cloudfunctions2BasicExample(context map[string]interface{}) string { return Nprintf(` +# [START functions_v2_basic] +locals { + project = "%{project}" # Google Cloud Platform Project ID +} + resource "google_storage_bucket" "bucket" { - provider = google-beta - name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}" + name = "${local.project}-tf-test-gcf-source%{random_suffix}" # Every bucket name must be globally unique location = "US" uniform_bucket_level_access = true } resource "google_storage_bucket_object" "object" { - provider = google-beta name = "function-source.zip" bucket = google_storage_bucket.bucket.name - source = "%{zip_path}" + source = "%{zip_path}" # Add path to the zipped function source code } -resource "google_cloudfunctions2_function" "terraform-test2" { - provider = google-beta - name = "tf-test-test-function%{random_suffix}" +resource "google_cloudfunctions2_function" "function" { + name = "tf-test-function-v2%{random_suffix}" location = "us-central1" description = "a new function" build_config { runtime = "nodejs16" - entry_point = "helloHttp" + entry_point = "helloHttp" # Set the entry point source { storage_source { bucket = google_storage_bucket.bucket.name @@ -90,6 +92,11 @@ resource "google_cloudfunctions2_function" "terraform-test2" { timeout_seconds = 60 } } + +output "function_uri" { + value = google_cloudfunctions2_function.function.service_config[0].uri +} +# [END functions_v2_basic] `, context) } @@ -97,7 +104,8 @@ func TestAccCloudfunctions2function_cloudfunctions2FullExample(t *testing.T) { t.Parallel() context := map[string]interface{}{ - "zip_path": "./test-fixtures/cloudfunctions2/function-source.zip", + "project": getTestProjectFromEnv(), + "zip_path": "./test-fixtures/cloudfunctions2/function-source-pubsub.zip", "primary_resource_id": "terraform-test", "location": "us-central1", "random_suffix": randString(t, 10), @@ -105,14 +113,14 @@ func TestAccCloudfunctions2function_cloudfunctions2FullExample(t *testing.T) { vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckCloudfunctions2functionDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccCloudfunctions2function_cloudfunctions2FullExample(context), }, { - ResourceName: "google_cloudfunctions2_function.terraform-test", + ResourceName: "google_cloudfunctions2_function.function", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"location", "build_config.0.source.0.storage_source.0.object", "build_config.0.source.0.storage_source.0.bucket"}, @@ -123,40 +131,40 @@ func TestAccCloudfunctions2function_cloudfunctions2FullExample(t *testing.T) { func testAccCloudfunctions2function_cloudfunctions2FullExample(context map[string]interface{}) string { return Nprintf(` +# [START functions_v2_full] +locals { + project = "%{project}" # Google Cloud Platform Project ID +} + resource "google_service_account" "account" { - provider = google-beta - account_id = "test-service-account" + account_id = "sa%{random_suffix}" display_name = "Test Service Account" } -resource "google_pubsub_topic" "sub" { - provider = google-beta - name = "pubsub" +resource "google_pubsub_topic" "topic" { + name = "tf-test-functions2-topic%{random_suffix}" } resource "google_storage_bucket" "bucket" { - provider = google-beta - name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}" + name = "${local.project}-tf-test-gcf-source%{random_suffix}" # Every bucket name must be globally unique location = "US" uniform_bucket_level_access = true } resource "google_storage_bucket_object" "object" { - provider = google-beta name = "function-source.zip" bucket = google_storage_bucket.bucket.name - source = "%{zip_path}" + source = "%{zip_path}" # Add path to the zipped function source code } -resource "google_cloudfunctions2_function" "terraform-test" { - provider = google-beta - name = "tf-test-test-function%{random_suffix}" +resource "google_cloudfunctions2_function" "function" { + name = "function%{random_suffix}" location = "us-central1" description = "a new function" build_config { runtime = "nodejs16" - entry_point = "helloHttp" + entry_point = "helloPubSub" # Set the entry point environment_variables = { BUILD_CONFIG_TEST = "build_test" } @@ -184,11 +192,288 @@ resource "google_cloudfunctions2_function" "terraform-test" { event_trigger { trigger_region = "us-central1" event_type = "google.cloud.pubsub.topic.v1.messagePublished" - pubsub_topic = google_pubsub_topic.sub.id + pubsub_topic = google_pubsub_topic.topic.id retry_policy = "RETRY_POLICY_RETRY" + } +} +# [END functions_v2_full] +`, context) +} + +func TestAccCloudfunctions2function_cloudfunctions2BasicGcsExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": getTestProjectFromEnv(), + "zip_path": "./test-fixtures/cloudfunctions2/function-source-eventarc-gcs.zip", + "primary_resource_id": "terraform-test", + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudfunctions2functionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudfunctions2function_cloudfunctions2BasicGcsExample(context), + }, + { + ResourceName: "google_cloudfunctions2_function.function", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "build_config.0.source.0.storage_source.0.object", "build_config.0.source.0.storage_source.0.bucket"}, + }, + }, + }) +} + +func testAccCloudfunctions2function_cloudfunctions2BasicGcsExample(context map[string]interface{}) string { + return Nprintf(` +# [START functions_v2_basic_gcs] + +resource "google_storage_bucket" "source-bucket" { + name = "tf-test-gcf-source-bucket%{random_suffix}" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + name = "function-source.zip" + bucket = google_storage_bucket.source-bucket.name + source = "%{zip_path}" # Add path to the zipped function source code +} + +resource "google_storage_bucket" "trigger-bucket" { + name = "tf-test-gcf-trigger-bucket%{random_suffix}" + location = "us-central1" # The trigger must be in the same location as the bucket + uniform_bucket_level_access = true +} + +data "google_storage_project_service_account" "gcs_account" { +} + +# To use GCS CloudEvent triggers, the GCS service account requires the Pub/Sub Publisher(roles/pubsub.publisher) IAM role in the specified project. +# (See https://cloud.google.com/eventarc/docs/run/quickstart-storage#before-you-begin) +resource "google_project_iam_member" "gcs-pubsub-publishing" { + project = "%{project}" + role = "roles/pubsub.publisher" + member = "serviceAccount:${data.google_storage_project_service_account.gcs_account.email_address}" +} + +resource "google_service_account" "account" { + account_id = "sa%{random_suffix}" + display_name = "Test Service Account - used for both the cloud function and eventarc trigger in the test" +} + +# Permissions on the service account used by the function and Eventarc trigger +resource "google_project_iam_member" "invoking" { + project = "%{project}" + role = "roles/run.invoker" + member = "serviceAccount:${google_service_account.account.email}" +} + +resource "google_project_iam_member" "event-receiving" { + project = "%{project}" + role = "roles/eventarc.eventReceiver" + member = "serviceAccount:${google_service_account.account.email}" +} + +resource "google_project_iam_member" "artifactregistry-reader" { + project = "%{project}" + role = "roles/artifactregistry.reader" + member = "serviceAccount:${google_service_account.account.email}" +} + +resource "google_cloudfunctions2_function" "function" { + depends_on = [ + google_project_iam_member.event-receiving, + google_project_iam_member.artifactregistry-reader, + ] + name = "function%{random_suffix}" + location = "us-central1" + description = "a new function" + + build_config { + runtime = "nodejs12" + entry_point = "entryPoint" # Set the entry point in the code + environment_variables = { + BUILD_CONFIG_TEST = "build_test" + } + source { + storage_source { + bucket = google_storage_bucket.source-bucket.name + object = google_storage_bucket_object.object.name + } + } + } + + service_config { + max_instance_count = 3 + min_instance_count = 1 + available_memory = "256M" + timeout_seconds = 60 + environment_variables = { + SERVICE_CONFIG_TEST = "config_test" + } + ingress_settings = "ALLOW_INTERNAL_ONLY" + all_traffic_on_latest_revision = true service_account_email = google_service_account.account.email } + + event_trigger { + trigger_region = "us-central1" # The trigger must be in the same location as the bucket + event_type = "google.cloud.storage.object.v1.finalized" + retry_policy = "RETRY_POLICY_RETRY" + service_account_email = google_service_account.account.email + event_filters { + attribute = "bucket" + value = google_storage_bucket.trigger-bucket.name + } + } +} +# [END functions_v2_basic_gcs] +`, context) +} + +func TestAccCloudfunctions2function_cloudfunctions2BasicAuditlogsExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": getTestProjectFromEnv(), + "zip_path": "./test-fixtures/cloudfunctions2/function-source-eventarc-gcs.zip", + "primary_resource_id": "terraform-test", + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudfunctions2functionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudfunctions2function_cloudfunctions2BasicAuditlogsExample(context), + }, + { + ResourceName: "google_cloudfunctions2_function.function", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "build_config.0.source.0.storage_source.0.object", "build_config.0.source.0.storage_source.0.bucket"}, + }, + }, + }) +} + +func testAccCloudfunctions2function_cloudfunctions2BasicAuditlogsExample(context map[string]interface{}) string { + return Nprintf(` +# [START functions_v2_basic_auditlogs] +# This example follows the examples shown in this Google Cloud Community blog post +# https://medium.com/google-cloud/applying-a-path-pattern-when-filtering-in-eventarc-f06b937b4c34 +# and the docs: +# https://cloud.google.com/eventarc/docs/path-patterns + +resource "google_storage_bucket" "source-bucket" { + name = "tf-test-gcf-source-bucket%{random_suffix}" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + name = "function-source.zip" + bucket = google_storage_bucket.source-bucket.name + source = "%{zip_path}" # Add path to the zipped function source code +} + +resource "google_service_account" "account" { + account_id = "tf-test-gcf-sa%{random_suffix}" + display_name = "Test Service Account - used for both the cloud function and eventarc trigger in the test" +} + +# Note: The right way of listening for Cloud Storage events is to use a Cloud Storage trigger. +# Here we use Audit Logs to monitor the bucket so path patterns can be used in the example of +# google_cloudfunctions2_function below (Audit Log events have path pattern support) +resource "google_storage_bucket" "audit-log-bucket" { + name = "tf-test-gcf-auditlog-bucket%{random_suffix}" + location = "us-central1" # The trigger must be in the same location as the bucket + uniform_bucket_level_access = true +} + +# Permissions on the service account used by the function and Eventarc trigger +resource "google_project_iam_member" "invoking" { + project = "%{project}" + role = "roles/run.invoker" + member = "serviceAccount:${google_service_account.account.email}" +} + +resource "google_project_iam_member" "event-receiving" { + project = "%{project}" + role = "roles/eventarc.eventReceiver" + member = "serviceAccount:${google_service_account.account.email}" +} + +resource "google_project_iam_member" "artifactregistry-reader" { + project = "%{project}" + role = "roles/artifactregistry.reader" + member = "serviceAccount:${google_service_account.account.email}" +} + +resource "google_cloudfunctions2_function" "function" { + depends_on = [ + google_project_iam_member.event-receiving, + google_project_iam_member.artifactregistry-reader, + ] + name = "tf-test-gcf-function%{random_suffix}" + location = "us-central1" + description = "a new function" + + build_config { + runtime = "nodejs12" + entry_point = "entryPoint" # Set the entry point in the code + environment_variables = { + BUILD_CONFIG_TEST = "build_test" + } + source { + storage_source { + bucket = google_storage_bucket.source-bucket.name + object = google_storage_bucket_object.object.name + } + } + } + + service_config { + max_instance_count = 3 + min_instance_count = 1 + available_memory = "256M" + timeout_seconds = 60 + environment_variables = { + SERVICE_CONFIG_TEST = "config_test" + } + ingress_settings = "ALLOW_INTERNAL_ONLY" + all_traffic_on_latest_revision = true + service_account_email = google_service_account.account.email + } + + event_trigger { + trigger_region = "us-central1" # The trigger must be in the same location as the bucket + event_type = "google.cloud.audit.log.v1.written" + retry_policy = "RETRY_POLICY_RETRY" + service_account_email = google_service_account.account.email + event_filters { + attribute = "serviceName" + value = "storage.googleapis.com" + } + event_filters { + attribute = "methodName" + value = "storage.objects.create" + } + event_filters { + attribute = "resourceName" + value = "/projects/_/buckets/${google_storage_bucket.audit-log-bucket.name}/objects/*.txt" # Path pattern selects all .txt files in the bucket + operator = "match-path-pattern" # This allows path patterns to be used in the value field + } + } } +# [END functions_v2_basic_auditlogs] `, context) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions_function.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions_function.go index ea4e6520cb..53a7b7ac39 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions_function.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions_function.go @@ -72,7 +72,7 @@ func parseCloudFunctionId(d *schema.ResourceData, config *Config) (*cloudFunctio }, nil } -// Differs from validateGcpName because Cloud Functions allow capital letters +// Differs from validateGCEName because Cloud Functions allow capital letters // at start/end func validateResourceCloudFunctionsFunctionName(v interface{}, k string) (ws []string, errors []error) { re := `^(?:[a-zA-Z](?:[-_a-zA-Z0-9]{0,61}[a-zA-Z0-9])?)$` diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment.go index 56e65d376c..1710fc751e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment.go @@ -133,7 +133,7 @@ func resourceComposerEnvironment() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the environment.`, }, "region": { @@ -434,12 +434,13 @@ func resourceComposerEnvironment() *schema.Resource { Description: `When enabled, IPs from public (non-RFC1918) ranges can be used for ip_allocation_policy.cluster_ipv4_cidr_block and ip_allocation_policy.service_ipv4_cidr_block.`, }, "cloud_composer_connection_subnetwork": { - Type: schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: composerPrivateEnvironmentConfig, - ForceNew: true, - Description: `When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.`, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: composerPrivateEnvironmentConfig, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + Description: `When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.`, }, }, }, @@ -503,7 +504,7 @@ func resourceComposerEnvironment() *schema.Resource { Computed: true, AtLeastOneOf: composerConfigKeys, MaxItems: 1, - Description: `The encryption options for the Composer environment and its dependencies. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + Description: `The encryption options for the Composer environment and its dependencies.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "kms_key_name": { @@ -871,6 +872,21 @@ func resourceComposerEnvironmentUpdate(d *schema.ResourceData, meta interface{}) } } + if d.HasChange("config.0.software_config.0.scheduler_count") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{}, + }, + } + if config != nil && config.SoftwareConfig != nil { + patchObj.Config.SoftwareConfig.SchedulerCount = config.SoftwareConfig.SchedulerCount + } + err = resourceComposerEnvironmentPatchField("config.softwareConfig.schedulerCount", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + if d.HasChange("config.0.software_config.0.airflow_config_overrides") { patchObj := &composer.Environment{ Config: &composer.EnvironmentConfig{ diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment_test.go index a50e55da5d..890cd4a1f2 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment_test.go @@ -138,8 +138,8 @@ func TestAccComposerEnvironment_update(t *testing.T) { }) } -// Checks private environment creation. -func TestAccComposerEnvironment_private(t *testing.T) { +// Checks private environment creation for composer 1 and 2. +func TestAccComposerEnvironmentComposer1_private(t *testing.T) { t.Parallel() envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, randInt(t)) @@ -152,7 +152,7 @@ func TestAccComposerEnvironment_private(t *testing.T) { CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccComposerEnvironment_private(envName, network, subnetwork), + Config: testAccComposerEnvironmentComposer1_private(envName, network, subnetwork), }, { ResourceName: "google_composer_environment.test", @@ -171,7 +171,46 @@ func TestAccComposerEnvironment_private(t *testing.T) { { PlanOnly: true, ExpectNonEmptyPlan: false, - Config: testAccComposerEnvironment_private(envName, network, subnetwork), + Config: testAccComposerEnvironmentComposer1_private(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironmentComposer2_private(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, randInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, randInt(t)) + subnetwork := network + "-1" + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer2_private(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/environments/%s", getTestProjectFromEnv(), "us-central1", envName), + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironmentComposer2_private(envName, network, subnetwork), Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), }, }, @@ -296,7 +335,42 @@ func TestAccComposerEnvironment_withWebServerConfig(t *testing.T) { }) } -func TestAccComposerEnvironment_withEncryptionConfig(t *testing.T) { +func TestAccComposerEnvironment_withEncryptionConfigComposer1(t *testing.T) { + t.Parallel() + + kms := BootstrapKMSKeyInLocation(t, "us-central1") + pid := getTestProjectFromEnv() + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, randInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, randInt(t)) + subnetwork := network + "-1" + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_encryptionCfg(pid, "1", "1", envName, kms.CryptoKey.Name, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_encryptionCfg(pid, "1", "1", envName, kms.CryptoKey.Name, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_withEncryptionConfigComposer2(t *testing.T) { t.Parallel() kms := BootstrapKMSKeyInLocation(t, "us-central1") @@ -311,7 +385,7 @@ func TestAccComposerEnvironment_withEncryptionConfig(t *testing.T) { CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccComposerEnvironment_encryptionCfg(pid, envName, kms.CryptoKey.Name, network, subnetwork), + Config: testAccComposerEnvironment_encryptionCfg(pid, "2", "2", envName, kms.CryptoKey.Name, network, subnetwork), }, { ResourceName: "google_composer_environment.test", @@ -324,7 +398,7 @@ func TestAccComposerEnvironment_withEncryptionConfig(t *testing.T) { { PlanOnly: true, ExpectNonEmptyPlan: false, - Config: testAccComposerEnvironment_encryptionCfg(pid, envName, kms.CryptoKey.Name, network, subnetwork), + Config: testAccComposerEnvironment_encryptionCfg(pid, "2", "2", envName, kms.CryptoKey.Name, network, subnetwork), Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), }, }, @@ -721,13 +795,21 @@ func TestAccComposerEnvironmentAirflow2_withSoftwareConfig(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccComposerEnvironmentUpdate_airflow2SoftwareCfg(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, // This is a terrible clean-up step in order to get destroy to succeed, // due to dangling firewall rules left by the Composer Environment blocking network deletion. // TODO: Remove this check if firewall rules bug gets fixed by Composer. { PlanOnly: true, ExpectNonEmptyPlan: false, - Config: testAccComposerEnvironment_airflow2SoftwareCfg(envName, network, subnetwork), + Config: testAccComposerEnvironmentUpdate_airflow2SoftwareCfg(envName, network, subnetwork), Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), }, }, @@ -801,74 +883,117 @@ func testAccComposerEnvironmentDestroyProducer(t *testing.T) func(s *terraform.S func testAccComposerEnvironment_basic(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - machine_type = "n1-standard-1" - ip_allocation_policy { - use_ip_aliases = true - cluster_ipv4_cidr_block = "10.0.0.0/16" - } - } - } + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + machine_type = "n1-standard-1" + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, name, network, subnetwork) } -func testAccComposerEnvironment_private(name, network, subnetwork string) string { +func testAccComposerEnvironmentComposer1_private(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - ip_allocation_policy { - use_ip_aliases = true - cluster_ipv4_cidr_block = "10.0.0.0/16" - } - } - private_environment_config { - enable_private_endpoint = true - enable_privately_used_public_ips = true - } - } + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + enable_ip_masq_agent = true + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + private_environment_config { + enable_private_endpoint = true + enable_privately_used_public_ips = true + } + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link - private_ip_google_access = true + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironmentComposer2_private(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + enable_ip_masq_agent = true + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + software_config { + image_version = "composer-2-airflow-2" + } + private_environment_config { + enable_private_endpoint = true + enable_privately_used_public_ips = true + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true } `, name, network, subnetwork) } @@ -876,51 +1001,51 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_privateWithWebServerControl(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - ip_allocation_policy { - use_ip_aliases = true - cluster_ipv4_cidr_block = "10.56.0.0/14" - services_ipv4_cidr_block = "10.122.0.0/20" - } - } - private_environment_config { - enable_private_endpoint = false - web_server_ipv4_cidr_block = "172.30.240.0/24" - cloud_sql_ipv4_cidr_block = "10.32.0.0/12" - master_ipv4_cidr_block = "172.17.50.0/28" - } - web_server_network_access_control { - allowed_ip_range { - value = "192.168.0.1" - description = "my range1" - } - allowed_ip_range { - value = "0.0.0.0/0" - } - } - } + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.56.0.0/14" + services_ipv4_cidr_block = "10.122.0.0/20" + } + } + private_environment_config { + enable_private_endpoint = false + web_server_ipv4_cidr_block = "172.30.240.0/24" + cloud_sql_ipv4_cidr_block = "10.32.0.0/12" + master_ipv4_cidr_block = "172.17.50.0/28" + } + web_server_network_access_control { + allowed_ip_range { + value = "192.168.0.1" + description = "my range1" + } + allowed_ip_range { + value = "0.0.0.0/0" + } + } + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link - private_ip_google_access = true + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true } `, name, network, subnetwork) } @@ -928,51 +1053,51 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_privateWithWebServerControlUpdated(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - ip_allocation_policy { - use_ip_aliases = true - cluster_ipv4_cidr_block = "10.56.0.0/14" - services_ipv4_cidr_block = "10.122.0.0/20" - } - } - private_environment_config { - enable_private_endpoint = false - web_server_ipv4_cidr_block = "172.30.240.0/24" - cloud_sql_ipv4_cidr_block = "10.32.0.0/12" - master_ipv4_cidr_block = "172.17.50.0/28" - } - web_server_network_access_control { - allowed_ip_range { - value = "192.168.0.1" - description = "my range1" - } - allowed_ip_range { - value = "0.0.0.0/0" - } - } - } + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.56.0.0/14" + services_ipv4_cidr_block = "10.122.0.0/20" + } + } + private_environment_config { + enable_private_endpoint = false + web_server_ipv4_cidr_block = "172.30.240.0/24" + cloud_sql_ipv4_cidr_block = "10.32.0.0/12" + master_ipv4_cidr_block = "172.17.50.0/28" + } + web_server_network_access_control { + allowed_ip_range { + value = "192.168.0.1" + description = "my range1" + } + allowed_ip_range { + value = "0.0.0.0/0" + } + } + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link - private_ip_google_access = true + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true } `, name, network, subnetwork) } @@ -980,32 +1105,32 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_databaseCfg(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - } - database_config { - machine_type = "db-n1-standard-4" - } - } + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + database_config { + machine_type = "db-n1-standard-4" + } + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, name, network, subnetwork) } @@ -1013,32 +1138,32 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_databaseCfgUpdated(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - } - database_config { - machine_type = "db-n1-standard-8" - } - } + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + database_config { + machine_type = "db-n1-standard-8" + } + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, name, network, subnetwork) } @@ -1046,32 +1171,32 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_webServerCfg(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - } - web_server_config { - machine_type = "composer-n1-webserver-4" - } - } + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + web_server_config { + machine_type = "composer-n1-webserver-4" + } + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, name, network, subnetwork) } @@ -1079,41 +1204,43 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_webServerCfgUpdated(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - } - web_server_config { - machine_type = "composer-n1-webserver-8" - } - } + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + web_server_config { + machine_type = "composer-n1-webserver-8" + } + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, name, network, subnetwork) } -func testAccComposerEnvironment_encryptionCfg(pid, name, kmsKey, network, subnetwork string) string { +func testAccComposerEnvironment_encryptionCfg(pid, compVersion, airflowVersion, name, kmsKey, network, subnetwork string) string { return fmt.Sprintf(` data "google_project" "project" { project_id = "%s" } + + resource "google_project_iam_member" "kms-project-binding1" { project = data.google_project.project.project_id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" @@ -1154,8 +1281,12 @@ resource "google_composer_environment" "test" { node_config { network = google_compute_network.test.self_link subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" } + + software_config { + image_version = "composer-%s-airflow-%s" + } + encryption_config { kms_key_name = "%s" } @@ -1173,33 +1304,34 @@ resource "google_compute_subnetwork" "test" { region = "us-central1" network = google_compute_network.test.self_link } -`, pid, kmsKey, name, kmsKey, network, subnetwork) +`, + pid, kmsKey, name, compVersion, airflowVersion, kmsKey, network, subnetwork) } func testAccComposerEnvironment_maintenanceWindow(envName, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - config { - maintenance_window { - start_time = "2019-08-01T01:00:00Z" - end_time = "2019-08-01T07:00:00Z" - recurrence = "FREQ=WEEKLY;BYDAY=TU,WE" - } - } + name = "%s" + region = "us-central1" + config { + maintenance_window { + start_time = "2019-08-01T01:00:00Z" + end_time = "2019-08-01T07:00:00Z" + recurrence = "FREQ=WEEKLY;BYDAY=TU,WE" + } + } } resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, envName, network, subnetwork) @@ -1208,27 +1340,27 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_maintenanceWindowUpdate(envName, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - config { - maintenance_window { - start_time = "2019-08-01T01:00:00Z" - end_time = "2019-08-01T07:00:00Z" - recurrence = "FREQ=DAILY" - } - } + name = "%s" + region = "us-central1" + config { + maintenance_window { + start_time = "2019-08-01T01:00:00Z" + end_time = "2019-08-01T07:00:00Z" + recurrence = "FREQ=DAILY" + } + } } resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, envName, network, subnetwork) @@ -1237,64 +1369,64 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_composerV2(envName, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-east1" - - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - ip_allocation_policy { - cluster_ipv4_cidr_block = "10.0.0.0/16" - } - } - - software_config { - image_version = "composer-2-airflow-2" - } - - workloads_config { - scheduler { - cpu = 1.25 - memory_gb = 2.5 - storage_gb = 5.4 - count = 2 - } - web_server { - cpu = 1.75 - memory_gb = 3.0 - storage_gb = 4.4 - } - worker { - cpu = 0.5 - memory_gb = 2.0 - storage_gb = 3.4 - min_count = 2 - max_count = 5 - } - } - environment_size = "ENVIRONMENT_SIZE_MEDIUM" - private_environment_config { - enable_private_endpoint = true - cloud_composer_network_ipv4_cidr_block = "10.3.192.0/24" - master_ipv4_cidr_block = "172.16.194.0/23" - cloud_sql_ipv4_cidr_block = "10.3.224.0/20" - } - } + name = "%s" + region = "us-east1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + + software_config { + image_version = "composer-2-airflow-2" + } + + workloads_config { + scheduler { + cpu = 1.25 + memory_gb = 2.5 + storage_gb = 5.4 + count = 2 + } + web_server { + cpu = 1.75 + memory_gb = 3.0 + storage_gb = 4.4 + } + worker { + cpu = 0.5 + memory_gb = 2.0 + storage_gb = 3.4 + min_count = 2 + max_count = 5 + } + } + environment_size = "ENVIRONMENT_SIZE_MEDIUM" + private_environment_config { + enable_private_endpoint = true + cloud_composer_network_ipv4_cidr_block = "10.3.192.0/24" + master_ipv4_cidr_block = "172.16.194.0/23" + cloud_sql_ipv4_cidr_block = "10.3.224.0/20" + } + } } resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-east1" - network = google_compute_network.test.self_link - private_ip_google_access = true + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-east1" + network = google_compute_network.test.self_link + private_ip_google_access = true } `, envName, network, subnetwork) @@ -1303,36 +1435,36 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_composerV2PrivateServiceConnect(envName, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - } - - software_config { - image_version = "composer-2-airflow-2" - } - private_environment_config { - cloud_composer_connection_subnetwork = google_compute_subnetwork.test.self_link - } - } + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + } + + software_config { + image_version = "composer-2-airflow-2" + } + private_environment_config { + cloud_composer_connection_subnetwork = google_compute_subnetwork.test.self_link + } + } } resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link - private_ip_google_access = true + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true } `, envName, network, subnetwork) @@ -1341,42 +1473,42 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_MasterAuthNetworks(compVersion, airflowVersion, envName, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" + name = "%s" + region = "us-central1" - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - } + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + } - software_config { - image_version = "composer-%s-airflow-%s" - } + software_config { + image_version = "composer-%s-airflow-%s" + } - master_authorized_networks_config { - enabled = true - cidr_blocks { - display_name = "foo" - cidr_block = "8.8.8.8/32" - } - cidr_blocks { - cidr_block = "8.8.8.0/24" - } - } - } + master_authorized_networks_config { + enabled = true + cidr_blocks { + display_name = "foo" + cidr_block = "8.8.8.8/32" + } + cidr_blocks { + cidr_block = "8.8.8.0/24" + } + } + } } resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, envName, compVersion, airflowVersion, network, subnetwork) @@ -1385,39 +1517,39 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_MasterAuthNetworksUpdate(compVersion, airflowVersion, envName, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" + name = "%s" + region = "us-central1" - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - } + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + } - software_config { - image_version = "composer-%s-airflow-%s" - } + software_config { + image_version = "composer-%s-airflow-%s" + } - master_authorized_networks_config { - enabled = true - cidr_blocks { - display_name = "foo_update" - cidr_block = "9.9.9.8/30" - } - } - } + master_authorized_networks_config { + enabled = true + cidr_blocks { + display_name = "foo_update" + cidr_block = "9.9.9.8/30" + } + } + } } resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, envName, compVersion, airflowVersion, network, subnetwork) @@ -1426,64 +1558,64 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_update(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - - config { - node_count = 4 - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - machine_type = "n1-standard-1" - ip_allocation_policy { - use_ip_aliases = true - cluster_ipv4_cidr_block = "10.0.0.0/16" - } - } + name = "%s" + region = "us-central1" + + config { + node_count = 4 + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + machine_type = "n1-standard-1" + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } - software_config { - image_version = "composer-1-airflow-1" + software_config { + image_version = "composer-1-airflow-1" - airflow_config_overrides = { - core-load_example = "True" - } + airflow_config_overrides = { + core-load_example = "True" + } - pypi_packages = { - numpy = "" - } + pypi_packages = { + numpy = "" + } - env_variables = { - FOO = "bar" - } - } - web_server_config { - machine_type = "composer-n1-webserver-4" - } + env_variables = { + FOO = "bar" + } + } + web_server_config { + machine_type = "composer-n1-webserver-4" + } - database_config { - machine_type = "db-n1-standard-4" - } - } + database_config { + machine_type = "db-n1-standard-4" + } + } - labels = { - foo = "bar" - anotherlabel = "boo" - } + labels = { + foo = "bar" + anotherlabel = "boo" + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, name, network, subnetwork) } @@ -1491,64 +1623,64 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_updateComposerV2(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-east1" - - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - ip_allocation_policy { - cluster_ipv4_cidr_block = "10.0.0.0/16" - } - } - - software_config { - image_version = "composer-2-airflow-2" - } - - workloads_config { - scheduler { - cpu = 2.25 - memory_gb = 3.5 - storage_gb = 6.4 - count = 3 - } - web_server { - cpu = 2.75 - memory_gb = 4.0 - storage_gb = 5.4 - } - worker { - cpu = 1.5 - memory_gb = 3.0 - storage_gb = 4.4 - min_count = 3 - max_count = 6 - } - } - environment_size = "ENVIRONMENT_SIZE_LARGE" - private_environment_config { - enable_private_endpoint = true - cloud_composer_network_ipv4_cidr_block = "10.3.192.0/24" - master_ipv4_cidr_block = "172.16.194.0/23" - cloud_sql_ipv4_cidr_block = "10.3.224.0/20" - } - } + name = "%s" + region = "us-east1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + + software_config { + image_version = "composer-2-airflow-2" + } + + workloads_config { + scheduler { + cpu = 2.25 + memory_gb = 3.5 + storage_gb = 6.4 + count = 3 + } + web_server { + cpu = 2.75 + memory_gb = 4.0 + storage_gb = 5.4 + } + worker { + cpu = 1.5 + memory_gb = 3.0 + storage_gb = 4.4 + min_count = 3 + max_count = 6 + } + } + environment_size = "ENVIRONMENT_SIZE_LARGE" + private_environment_config { + enable_private_endpoint = true + cloud_composer_network_ipv4_cidr_block = "10.3.192.0/24" + master_ipv4_cidr_block = "172.16.194.0/23" + cloud_sql_ipv4_cidr_block = "10.3.224.0/20" + } + } } resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-east1" - network = google_compute_network.test.self_link - private_ip_google_access = true + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-east1" + network = google_compute_network.test.self_link + private_ip_google_access = true } `, name, network, subnetwork) } @@ -1558,47 +1690,46 @@ func testAccComposerEnvironment_nodeCfg(environment, network, subnetwork, servic data "google_project" "project" {} resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - - service_account = google_service_account.test.name - max_pods_per_node = 33 - enable_ip_masq_agent = true - ip_allocation_policy { - use_ip_aliases = true - cluster_ipv4_cidr_block = "10.0.0.0/16" - } - } - } - depends_on = [google_project_iam_member.composer-worker] + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + + service_account = google_service_account.test.name + max_pods_per_node = 33 + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + } + depends_on = [google_project_iam_member.composer-worker] } resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } resource "google_service_account" "test" { - account_id = "%s" - display_name = "Test Service Account for Composer Environment" + account_id = "%s" + display_name = "Test Service Account for Composer Environment" } resource "google_project_iam_member" "composer-worker" { - project = data.google_project.project.project_id - role = "roles/composer.worker" - member = "serviceAccount:${google_service_account.test.email}" + project = data.google_project.project.project_id + role = "roles/composer.worker" + member = "serviceAccount:${google_service_account.test.email}" } `, environment, network, subnetwork, serviceAccount) } @@ -1606,33 +1737,33 @@ resource "google_project_iam_member" "composer-worker" { func testAccComposerEnvironment_softwareCfg(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - } - software_config { - image_version = "composer-1-airflow-1" - python_version = "3" - } - } + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + software_config { + image_version = "composer-1-airflow-1" + python_version = "3" + } + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, name, network, subnetwork) } @@ -1640,34 +1771,34 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_updateOnlyFields(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - } - software_config { - pypi_packages = { - numpy = "" - } - } - } + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + software_config { + pypi_packages = { + numpy = "" + } + } + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, name, network, subnetwork) } @@ -1675,33 +1806,67 @@ resource "google_compute_subnetwork" "test" { func testAccComposerEnvironment_airflow2SoftwareCfg(name, network, subnetwork string) string { return fmt.Sprintf(` resource "google_composer_environment" "test" { - name = "%s" - region = "us-central1" - config { - node_config { - network = google_compute_network.test.self_link - subnetwork = google_compute_subnetwork.test.self_link - zone = "us-central1-a" - } - software_config { - image_version = "composer-1-airflow-2" - scheduler_count = 2 - } - } + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + software_config { + image_version = "composer-1-airflow-2" + scheduler_count = 2 + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironmentUpdate_airflow2SoftwareCfg(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + software_config { + image_version = "composer-1-airflow-2" + scheduler_count = 3 + } + } } // use a separate network to avoid conflicts with other tests running in parallel // that use the default network/subnet resource "google_compute_network" "test" { - name = "%s" - auto_create_subnetworks = false + name = "%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "test" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = google_compute_network.test.self_link + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link } `, name, network, subnetwork) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_address_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_address_generated_test.go index f47e4894f3..289631ea1a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_address_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_address_generated_test.go @@ -206,7 +206,7 @@ resource "google_compute_address" "static" { } data "google_compute_image" "debian_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_attached_disk_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_attached_disk_test.go index fd10a9e7dc..f4d9128b97 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_attached_disk_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_attached_disk_test.go @@ -204,7 +204,7 @@ resource "google_compute_instance" "test" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -234,7 +234,7 @@ resource "google_compute_instance" "test" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -265,7 +265,7 @@ resource "google_compute_instance" "test" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler.go index 83b234b4f0..faae599ac5 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler.go @@ -380,7 +380,7 @@ to include directives regarding slower scale down, as described above.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. The name must be 1-63 characters long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler_generated_test.go index 31084eb21b..bc974c73c4 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler_generated_test.go @@ -120,7 +120,7 @@ resource "google_compute_instance_group_manager" "default" { data "google_compute_image" "debian_9" { provider = google-beta - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -216,7 +216,7 @@ resource "google_compute_instance_group_manager" "foobar" { } data "google_compute_image" "debian_9" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } `, context) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler_test.go index 5ffc82520f..02ed256217 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler_test.go @@ -168,7 +168,7 @@ func TestAccComputeAutoscaler_scaleInControlFixed(t *testing.T) { func testAccComputeAutoscaler_scaffolding(itName, tpName, igmName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket.go index ccb5739785..01e23712d1 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket.go @@ -68,6 +68,21 @@ last character, which cannot be a dash.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "bypass_cache_on_request_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.`, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_name": { + Type: schema.TypeString, + Optional: true, + Description: `The header field name to match on when bypassing cache. Values are case-insensitive.`, + }, + }, + }, + }, "cache_key_policy": { Type: schema.TypeList, Optional: true, @@ -154,6 +169,11 @@ can be specified as values, and you cannot specify a status code more than once. }, }, }, + "request_coalescing": { + Type: schema.TypeBool, + Optional: true, + Description: `If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.`, + }, "serve_while_stale": { Type: schema.TypeInt, Computed: true, @@ -607,6 +627,10 @@ func flattenComputeBackendBucketCdnPolicy(v interface{}, d *schema.ResourceData, flattenComputeBackendBucketCdnPolicyCacheMode(original["cacheMode"], d, config) transformed["serve_while_stale"] = flattenComputeBackendBucketCdnPolicyServeWhileStale(original["serveWhileStale"], d, config) + transformed["request_coalescing"] = + flattenComputeBackendBucketCdnPolicyRequestCoalescing(original["requestCoalescing"], d, config) + transformed["bypass_cache_on_request_headers"] = + flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(original["bypassCacheOnRequestHeaders"], d, config) return []interface{}{transformed} } func flattenComputeBackendBucketCdnPolicyCacheKeyPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -778,6 +802,32 @@ func flattenComputeBackendBucketCdnPolicyServeWhileStale(v interface{}, d *schem return v // let terraform core handle it otherwise } +func flattenComputeBackendBucketCdnPolicyRequestCoalescing(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "header_name": flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(original["headerName"], d, config), + }) + } + return transformed +} +func flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenComputeBackendBucketEdgeSecurityPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -878,6 +928,20 @@ func expandComputeBackendBucketCdnPolicy(v interface{}, d TerraformResourceData, transformed["serveWhileStale"] = transformedServeWhileStale } + transformedRequestCoalescing, err := expandComputeBackendBucketCdnPolicyRequestCoalescing(original["request_coalescing"], d, config) + if err != nil { + return nil, err + } else { + transformed["requestCoalescing"] = transformedRequestCoalescing + } + + transformedBypassCacheOnRequestHeaders, err := expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(original["bypass_cache_on_request_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBypassCacheOnRequestHeaders); val.IsValid() && !isEmptyValue(val) { + transformed["bypassCacheOnRequestHeaders"] = transformedBypassCacheOnRequestHeaders + } + return transformed, nil } @@ -980,6 +1044,36 @@ func expandComputeBackendBucketCdnPolicyServeWhileStale(v interface{}, d Terrafo return v, nil } +func expandComputeBackendBucketCdnPolicyRequestCoalescing(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHeaderName, err := expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(original["header_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + transformed["headerName"] = transformedHeaderName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandComputeBackendBucketEdgeSecurityPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket_generated_test.go index eacac3247d..f152d58f3f 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket_generated_test.go @@ -251,6 +251,224 @@ resource "google_storage_bucket" "image_bucket" { `, context) } +func TestAccComputeBackendBucket_externalCdnLbWithBackendBucketExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendBucket_externalCdnLbWithBackendBucketExample(context), + }, + { + ResourceName: "google_compute_backend_bucket.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeBackendBucket_externalCdnLbWithBackendBucketExample(context map[string]interface{}) string { + return Nprintf(` +# CDN load balancer with Cloud bucket as backend + +# Cloud Storage bucket +resource "google_storage_bucket" "default" { + name = "tf-test-my-bucket%{random_suffix}" + location = "us-east1" + uniform_bucket_level_access = true + storage_class = "STANDARD" + // delete bucket and contents on destroy. + force_destroy = true + // Assign specialty files + website { + main_page_suffix = "index.html" + not_found_page = "404.html" + } +} + + +# make bucket public +resource "google_storage_bucket_iam_member" "default" { + bucket = google_storage_bucket.default.name + role = "roles/storage.objectViewer" + member = "allUsers" +} + +resource "google_storage_bucket_object" "index_page" { + name = "tf-test-index-page%{random_suffix}" + bucket = google_storage_bucket.default.name + content = <<-EOT + +

Congratulations on setting up Google Cloud CDN with Storage backend!

+ + EOT +} + +resource "google_storage_bucket_object" "error_page" { + name = "tf-test-404-page%{random_suffix}" + bucket = google_storage_bucket.default.name + content = <<-EOT + +

404 Error: Object you are looking for is no longer available!

+ + EOT +} + +# image object for testing, try to access http:///test.jpg +resource "google_storage_bucket_object" "test_image" { + name = "tf-test-test-object%{random_suffix}" + # Uncomment and add valid path to an object. + # source = "/path/to/an/object" + # content_type = "image/jpeg" + + # Delete after uncommenting above source and content_type attributes + content = "Data as string to be uploaded" + content_type = "text/plain" + + bucket = google_storage_bucket.default.name +} + +# reserve IP address +resource "google_compute_global_address" "default" { + name = "tf-test-example-ip%{random_suffix}" +} + +# forwarding rule +resource "google_compute_global_forwarding_rule" "default" { + name = "tf-test-http-lb-forwarding-rule%{random_suffix}" + ip_protocol = "TCP" + load_balancing_scheme = "EXTERNAL" + port_range = "80" + target = google_compute_target_http_proxy.default.id + ip_address = google_compute_global_address.default.id +} + +# http proxy +resource "google_compute_target_http_proxy" "default" { + name = "tf-test-http-lb-proxy%{random_suffix}" + url_map = google_compute_url_map.default.id +} + +# url map +resource "google_compute_url_map" "default" { + name = "tf-test-http-lb%{random_suffix}" + default_service = google_compute_backend_bucket.default.id +} + +# backend bucket with CDN policy with default ttl settings +resource "google_compute_backend_bucket" "default" { + name = "tf-test-cat-backend-bucket%{random_suffix}" + description = "Contains beautiful images" + bucket_name = google_storage_bucket.default.name + enable_cdn = true + cdn_policy { + cache_mode = "CACHE_ALL_STATIC" + client_ttl = 3600 + default_ttl = 3600 + max_ttl = 86400 + negative_caching = true + serve_while_stale = 86400 + } +} +`, context) +} + +func TestAccComputeBackendBucket_backendBucketBypassCacheExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendBucket_backendBucketBypassCacheExample(context), + }, + { + ResourceName: "google_compute_backend_bucket.image_backend", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeBackendBucket_backendBucketBypassCacheExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_backend_bucket" "image_backend" { + name = "tf-test-image-backend-bucket%{random_suffix}" + description = "Contains beautiful images" + bucket_name = google_storage_bucket.image_bucket.name + enable_cdn = true + cdn_policy { + bypass_cache_on_request_headers { + header_name = "test" + } + } +} + +resource "google_storage_bucket" "image_bucket" { + name = "tf-test-image-store-bucket%{random_suffix}" + location = "EU" +} +`, context) +} + +func TestAccComputeBackendBucket_backendBucketCoalescingExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendBucket_backendBucketCoalescingExample(context), + }, + { + ResourceName: "google_compute_backend_bucket.image_backend", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeBackendBucket_backendBucketCoalescingExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_backend_bucket" "image_backend" { + name = "tf-test-image-backend-bucket%{random_suffix}" + description = "Contains beautiful images" + bucket_name = google_storage_bucket.image_bucket.name + enable_cdn = true + cdn_policy { + request_coalescing = true + } +} + +resource "google_storage_bucket" "image_bucket" { + name = "tf-test-image-store-bucket%{random_suffix}" + location = "EU" +} +`, context) +} + func testAccCheckComputeBackendBucketDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service.go index 7148838b7a..28dbfd538a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service.go @@ -995,7 +995,10 @@ partial URL.`, For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) -and CONNECTION (for TCP/SSL). Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, +and CONNECTION (for TCP/SSL). + +See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) +for an explanation of load balancing modes. Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, Default: "UTILIZATION", }, "capacity_scaler": { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service_test.go index e3665376af..7a3f54be1d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service_test.go @@ -948,7 +948,7 @@ func testAccComputeBackendService_withBackend( serviceName, igName, itName, checkName string, timeout int64) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1005,7 +1005,7 @@ func testAccComputeBackendService_withBackendAndMaxUtilization( serviceName, igName, itName, checkName string, timeout int64) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1063,7 +1063,7 @@ func testAccComputeBackendService_withBackendAndIAP( serviceName, igName, itName, checkName string, timeout int64) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1270,7 +1270,7 @@ func testAccComputeBackendService_withMaxConnections( serviceName, igName, itName, checkName string, maxConnections int64) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1327,7 +1327,7 @@ func testAccComputeBackendService_withMaxConnectionsPerInstance( serviceName, igName, itName, checkName string, maxConnectionsPerInstance int64) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1399,7 +1399,7 @@ resource "google_compute_backend_service" "lipsum" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1477,7 +1477,7 @@ resource "google_compute_backend_service" "lipsum" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1625,7 +1625,7 @@ resource "google_compute_url_map" "default" { } data "google_compute_image" "debian_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk.go index ea73b72706..a93db04637 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk.go @@ -140,9 +140,15 @@ func diskImageEquals(oldImageName, newImageName string) bool { func diskImageFamilyEquals(imageName, familyName string) bool { // Handles the case when the image name includes the family name - // e.g. image name: debian-9-drawfork-v20180109, family name: debian-9 - if strings.Contains(imageName, familyName) { - return true + // e.g. image name: debian-11-bullseye-v20220719, family name: debian-11 + // We have to check for arm64 because of cases like: + // image name: opensuse-leap-15-4-v20220713-arm64, family name: opensuse-leap (should not suppress) + if strings.Contains(imageName, strings.TrimSuffix(familyName, "-arm64")) { + if strings.Contains(imageName, "-arm64") { + return strings.HasSuffix(familyName, "-arm64") + } else { + return !strings.HasSuffix(familyName, "-arm64") + } } if suppressCanonicalFamilyDiff(imageName, familyName) { @@ -167,8 +173,13 @@ func diskImageFamilyEquals(imageName, familyName string) bool { // e.g. image: ubuntu-1404-trusty-v20180122, family: ubuntu-1404-lts func suppressCanonicalFamilyDiff(imageName, familyName string) bool { parts := canonicalUbuntuLtsImage.FindStringSubmatch(imageName) - if len(parts) == 3 { - f := fmt.Sprintf("ubuntu-%s%s-lts", parts[1], parts[2]) + if len(parts) == 4 { + var f string + if parts[3] == "" { + f = fmt.Sprintf("ubuntu-%s%s-lts", parts[1], parts[2]) + } else { + f = fmt.Sprintf("ubuntu-%s%s-lts-%s", parts[1], parts[2], parts[3]) + } if f == familyName { return true } @@ -378,6 +389,7 @@ the supported values for the caller's project.`, }, "provisioned_iops": { Type: schema.TypeInt, + Computed: true, Optional: true, ForceNew: true, Description: `Indicates how many IOPS must be provisioned for the disk.`, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_generated_test.go index 19e3032a44..3cffc3ec8d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_generated_test.go @@ -54,7 +54,7 @@ resource "google_compute_disk" "default" { name = "tf-test-test-disk%{random_suffix}" type = "pd-ssd" zone = "us-central1-a" - image = "debian-9-stretch-v20200805" + image = "debian-11-bullseye-v20220719" labels = { environment = "dev" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_resource_policy_attachment_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_resource_policy_attachment_generated_test.go index e2984a915e..b3095f98b6 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_resource_policy_attachment_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_resource_policy_attachment_generated_test.go @@ -78,7 +78,7 @@ resource "google_compute_resource_policy" "policy" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } `, context) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_resource_policy_attachment_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_resource_policy_attachment_test.go index e9fe082caf..6d5fe05e93 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_resource_policy_attachment_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_resource_policy_attachment_test.go @@ -43,7 +43,7 @@ func TestAccComputeDiskResourcePolicyAttachment_update(t *testing.T) { func testAccComputeDiskResourcePolicyAttachment_basic(diskName, policyName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_test.go index d2e08333d7..794b7ab00e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk_test.go @@ -171,12 +171,92 @@ func TestDiskImageDiffSuppress(t *testing.T) { New: "different-cloud/debian-8", ExpectDiffSuppress: false, }, + // arm images + "matching image opensuse arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-leap-15-4-v20220713-arm64", + New: "opensuse-leap-arm64", + ExpectDiffSuppress: true, + }, + "matching image sles arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-15-sp4-v20220713-arm64", + New: "sles-15-arm64", + ExpectDiffSuppress: true, + }, + "matching image ubuntu arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1804-bionic-arm64-v20220712", + New: "ubuntu-1804-lts-arm64", + ExpectDiffSuppress: true, + }, + "matching image ubuntu-minimal arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2004-focal-arm64-v20220713", + New: "ubuntu-minimal-2004-lts-arm64", + ExpectDiffSuppress: true, + }, + "matching image debian arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-11-bullseye-arm64-v20220719", + New: "debian-11-arm64", + ExpectDiffSuppress: true, + }, + "different architecture image opensuse arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-leap-15-4-v20220713-arm64", + New: "opensuse-leap", + ExpectDiffSuppress: false, + }, + "different architecture image sles arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-15-sp4-v20220713-arm64", + New: "sles-15", + ExpectDiffSuppress: false, + }, + "different architecture image ubuntu arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1804-bionic-arm64-v20220712", + New: "ubuntu-1804-lts", + ExpectDiffSuppress: false, + }, + "different architecture image ubuntu-minimal arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2004-focal-arm64-v20220713", + New: "ubuntu-minimal-2004-lts", + ExpectDiffSuppress: false, + }, + "different architecture image debian arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-11-bullseye-arm64-v20220719", + New: "debian-11", + ExpectDiffSuppress: false, + }, + "different architecture image opensuse arm64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-leap-15-2-v20200702", + New: "opensuse-leap-arm64", + ExpectDiffSuppress: false, + }, + "different architecture image sles arm64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-15-sp4-v20220722-x86-64", + New: "sles-15-arm64", + ExpectDiffSuppress: false, + }, + "different architecture image ubuntu arm64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1804-bionic-v20220712", + New: "ubuntu-1804-lts-arm64", + ExpectDiffSuppress: false, + }, + "different architecture image ubuntu-minimal arm64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2004-focal-v20220713", + New: "ubuntu-minimal-2004-lts-arm64", + ExpectDiffSuppress: false, + }, + "different architecture image debian arm64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-11-bullseye-v20220719", + New: "debian-11-arm64", + ExpectDiffSuppress: false, + }, } for tn, tc := range cases { - if diskImageDiffSuppress("image", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Errorf("bad: %s, %q => %q expect DiffSuppress to return %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } + tc := tc + t.Run(tn, func(t *testing.T) { + t.Parallel() + if diskImageDiffSuppress("image", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("%q => %q expect DiffSuppress to return %t", tc.Old, tc.New, tc.ExpectDiffSuppress) + } + }) } } @@ -185,7 +265,7 @@ func TestAccComputeDisk_imageDiffSuppressPublicVendorsFamilyNames(t *testing.T) t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } config := getInitializedConfig(t) @@ -437,6 +517,27 @@ func TestAccComputeDisk_deleteDetachIGM(t *testing.T) { }) } +func TestAccComputeDisk_pdExtremeImplicitProvisionedIops(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_pdExtremeImplicitProvisionedIops(diskName), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccComputeDisk_resourcePolicies(t *testing.T) { t.Parallel() @@ -530,7 +631,7 @@ func testAccCheckEncryptionKey(t *testing.T, n string, disk *compute.Disk) resou func testAccComputeDisk_basic(diskName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -550,7 +651,7 @@ resource "google_compute_disk" "foobar" { func testAccComputeDisk_timeout(diskName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -561,7 +662,7 @@ resource "google_compute_disk" "foobar" { zone = "us-central1-a" timeouts { - create = "1s" + create = ".5s" } } `, diskName) @@ -570,7 +671,7 @@ resource "google_compute_disk" "foobar" { func testAccComputeDisk_updated(diskName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -591,7 +692,7 @@ resource "google_compute_disk" "foobar" { func testAccComputeDisk_fromSnapshot(projectName, firstDiskName, snapshotName, diskName, ref_selector string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -624,7 +725,7 @@ resource "google_compute_disk" "seconddisk" { func testAccComputeDisk_encryption(diskName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -648,7 +749,7 @@ data "google_project" "project" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -677,7 +778,7 @@ resource "google_compute_disk" "foobar" { func testAccComputeDisk_deleteDetach(instanceName, diskName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -714,7 +815,7 @@ resource "google_compute_instance" "bar" { func testAccComputeDisk_deleteDetachIGM(diskName, mgrName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -766,10 +867,20 @@ resource "google_compute_instance_group_manager" "manager" { `, diskName, mgrName) } +func testAccComputeDisk_pdExtremeImplicitProvisionedIops(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + type = "pd-extreme" + size = 1 +} +`, diskName) +} + func testAccComputeDisk_resourcePolicies(diskName, policyName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -800,7 +911,7 @@ resource "google_compute_disk" "foobar" { func testAccComputeDisk_multiWriter(instance string, diskName string, enableMultiwriter bool) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_external_vpn_gateway_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_external_vpn_gateway_generated_test.go index f7e6d5cc87..668e02c775 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_external_vpn_gateway_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_external_vpn_gateway_generated_test.go @@ -67,7 +67,7 @@ resource "google_compute_external_vpn_gateway" "external_gateway" { } resource "google_compute_network" "network" { - name = "network%{random_suffix}" + name = "tf-test-network-1%{random_suffix}" routing_mode = "GLOBAL" auto_create_subnetworks = false } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall.go index f382914f66..2eca41d907 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall.go @@ -150,7 +150,7 @@ func resourceComputeFirewall() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match @@ -311,7 +311,7 @@ one of 'source_ranges', 'source_tags' or 'source_service_accounts' is required.` Type: schema.TypeString, }, Set: schema.HashString, - ConflictsWith: []string{"destination_ranges", "source_service_accounts", "target_service_accounts"}, + ConflictsWith: []string{"source_service_accounts", "destination_ranges", "target_service_accounts"}, }, "target_service_accounts": { Type: schema.TypeSet, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy.go index f38b40740a..9101c16817 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy.go @@ -120,12 +120,12 @@ func resourceComputeFirewallPolicyCreate(d *schema.ResourceData, meta interface{ Description: dcl.String(d.Get("description").(string)), } - id, err := replaceVars(d, config, "locations/global/firewallPolicies/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -142,7 +142,7 @@ func resourceComputeFirewallPolicyCreate(d *schema.ResourceData, meta interface{ } else { client.Config.BasePath = bp } - res, err := client.ApplyFirewallPolicy(context.Background(), obj, createDirective...) + res, err := client.ApplyFirewallPolicy(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -155,10 +155,11 @@ func resourceComputeFirewallPolicyCreate(d *schema.ResourceData, meta interface{ if err = d.Set("name", res.Name); err != nil { return fmt.Errorf("error setting name in state: %s", err) } - // Id has a server-generated value, set again after creation - id, err = replaceVars(d, config, "locations/global/firewallPolicies/{{name}}") + // ID has a server-generated value, set again after creation. + + id, err = res.ID() if err != nil { - return fmt.Errorf("Error constructing id: %s", err) + return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_association.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_association.go index 8779c92a88..5a844026c5 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_association.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_association.go @@ -84,12 +84,12 @@ func resourceComputeFirewallPolicyAssociationCreate(d *schema.ResourceData, meta Name: dcl.String(d.Get("name").(string)), } - id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -106,7 +106,7 @@ func resourceComputeFirewallPolicyAssociationCreate(d *schema.ResourceData, meta } else { client.Config.BasePath = bp } - res, err := client.ApplyFirewallPolicyAssociation(context.Background(), obj, createDirective...) + res, err := client.ApplyFirewallPolicyAssociation(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_rule.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_rule.go index 034ead910a..ba4261510e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_rule.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_rule.go @@ -190,12 +190,12 @@ func resourceComputeFirewallPolicyRuleCreate(d *schema.ResourceData, meta interf TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), } - id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -212,7 +212,7 @@ func resourceComputeFirewallPolicyRuleCreate(d *schema.ResourceData, meta interf } else { client.Config.BasePath = bp } - res, err := client.ApplyFirewallPolicyRule(context.Background(), obj, createDirective...) + res, err := client.ApplyFirewallPolicyRule(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule.go index 13e31e051a..730580af76 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule.go @@ -187,7 +187,7 @@ func resourceComputeForwardingRule() *schema.Resource { Optional: true, ForceNew: true, Description: "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, }, "subnetwork": { @@ -305,7 +305,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -322,7 +322,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ } else { client.Config.BasePath = bp } - res, err := client.ApplyForwardingRule(context.Background(), obj, createDirective...) + res, err := client.ApplyForwardingRule(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule_generated_test.go index fc925bba4b..38d48a1b35 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule_generated_test.go @@ -813,7 +813,7 @@ resource "google_compute_region_backend_service" "default" { data "google_compute_image" "debian_image" { provider = google-beta - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1032,7 +1032,7 @@ resource "google_compute_region_backend_service" "default" { data "google_compute_image" "debian_image" { provider = google-beta - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule.go index ad98284c62..33a65cbf6d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule.go @@ -232,12 +232,12 @@ func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta inte Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -254,7 +254,7 @@ func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta inte } else { client.Config.BasePath = bp } - res, err := client.ApplyForwardingRule(context.Background(), obj, createDirective...) + res, err := client.ApplyForwardingRule(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule_generated_test.go index 49394a62d7..ed39333ce7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule_generated_test.go @@ -171,7 +171,7 @@ resource "google_compute_instance_group_manager" "default" { zone = "us-central1-c" named_port { name = "tcp" - port = 110 + port = 80 } version { instance_template = google_compute_instance_template.default.id @@ -551,7 +551,7 @@ resource "google_compute_backend_service" "default" { data "google_compute_image" "debian_image" { provider = google-beta - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -695,11 +695,27 @@ func TestAccComputeGlobalForwardingRule_globalForwardingRuleHybridExample(t *tes func testAccComputeGlobalForwardingRule_globalForwardingRuleHybridExample(context map[string]interface{}) string { return Nprintf(` // Roughly mirrors https://cloud.google.com/load-balancing/docs/https/setting-up-ext-https-hybrid +variable "subnetwork_cidr" { + default = "10.0.0.0/24" +} resource "google_compute_network" "default" { name = "tf-test-my-network%{random_suffix}" } +resource "google_compute_network" "internal" { + name = "tf-test-my-internal-network%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "internal"{ + name = "tf-test-my-subnetwork%{random_suffix}" + network = google_compute_network.internal.id + ip_cidr_range = var.subnetwork_cidr + region = "us-central1" + private_ip_google_access= true +} + // Zonal NEG with GCE_VM_IP_PORT resource "google_compute_network_endpoint_group" "default" { name = "tf-test-default-neg%{random_suffix}" @@ -709,6 +725,15 @@ resource "google_compute_network_endpoint_group" "default" { network_endpoint_type = "GCE_VM_IP_PORT" } +// Zonal NEG with GCE_VM_IP +resource "google_compute_network_endpoint_group" "internal" { + name = "tf-test-internal-neg%{random_suffix}" + network = google_compute_network.internal.id + subnetwork = google_compute_subnetwork.internal.id + zone = "us-central1-a" + network_endpoint_type = "GCE_VM_IP" +} + // Hybrid connectivity NEG resource "google_compute_network_endpoint_group" "hybrid" { name = "tf-test-hybrid-neg%{random_suffix}" diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule_test.go index b65c307d84..89502673f4 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule_test.go @@ -451,7 +451,7 @@ resource "google_compute_url_map" "default" { } data "google_compute_image" "debian_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -570,7 +570,7 @@ resource "google_compute_url_map" "default" { } data "google_compute_image" "debian_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_network_endpoint_group.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_network_endpoint_group.go index 65ba650432..847400db51 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_network_endpoint_group.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_network_endpoint_group.go @@ -43,7 +43,7 @@ func resourceComputeGlobalNetworkEndpointGroup() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ha_vpn_gateway.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ha_vpn_gateway.go index 2b38e76a4a..bd24bc33fc 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ha_vpn_gateway.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ha_vpn_gateway.go @@ -43,7 +43,7 @@ func resourceComputeHaVpnGateway() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_image_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_image_test.go index 5cd11a6a70..ce4792e554 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_image_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_image_test.go @@ -238,7 +238,7 @@ func testAccCheckComputeImageResolution(t *testing.T, n string) resource.TestChe family := rs.Primary.Attributes["family"] link := rs.Primary.Attributes["self_link"] - latestDebian, err := config.NewComputeClient(config.userAgent).Images.GetFromFamily("debian-cloud", "debian-9").Do() + latestDebian, err := config.NewComputeClient(config.userAgent).Images.GetFromFamily("debian-cloud", "debian-11").Do() if err != nil { return fmt.Errorf("Error retrieving latest debian: %s", err) } @@ -308,7 +308,7 @@ func testAccCheckComputeImageHasSourceType(image *compute.Image) resource.TestCh func testAccComputeImage_resolving(name, family string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -346,7 +346,7 @@ resource "google_compute_image" "foobar" { func testAccComputeImage_license(name string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -366,7 +366,7 @@ resource "google_compute_image" "foobar" { empty-label = "" } licenses = [ - "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/licenses/debian-9-stretch", + "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/licenses/debian-11-bullseye", ] } `, name, name) @@ -392,7 +392,7 @@ resource "google_compute_image" "foobar" { func testAccComputeImage_basedondisk(diskName, imageName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -412,7 +412,7 @@ resource "google_compute_image" "foobar" { func testAccComputeImage_sourceImage(imageName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -426,7 +426,7 @@ resource "google_compute_image" "foobar" { func testAccComputeImage_sourceSnapshot(diskName, snapshotName, imageName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance.go index c02fefa540..a571191114 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance.go @@ -46,6 +46,7 @@ var ( "scheduling.0.node_affinities", "scheduling.0.min_node_cpus", "scheduling.0.provisioning_model", + "scheduling.0.instance_termination_action", } shieldedInstanceConfigKeys = []string{ @@ -613,11 +614,13 @@ func resourceComputeInstance() *schema.Resource { DiffSuppressFunc: emptyOrDefaultStringSuppress(""), Description: `Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems.`, }, + "min_node_cpus": { Type: schema.TypeInt, Optional: true, AtLeastOneOf: schedulingKeys, }, + "provisioning_model": { Type: schema.TypeString, Optional: true, @@ -626,6 +629,13 @@ func resourceComputeInstance() *schema.Resource { AtLeastOneOf: schedulingKeys, Description: `Whether the instance is spot. If this is set as SPOT.`, }, + + "instance_termination_action": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: schedulingKeys, + Description: `Specifies the action GCE should take when SPOT VM is preempted.`, + }, }, }, }, @@ -2208,13 +2218,15 @@ func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([ // issues when a count of `0` guest accelerators is desired. This may occur when // guest_accelerator support is controlled via a module variable. E.g.: // -// guest_accelerators { -// count = "${var.enable_gpu ? var.gpu_count : 0}" -// ... -// } +// guest_accelerators { +// count = "${var.enable_gpu ? var.gpu_count : 0}" +// ... +// } + // After reconciling the desired and actual state, we would otherwise see a -// perpetual resembling: -// [] != [{"count":0, "type": "nvidia-tesla-k80"}] +// perpetual diff resembling: +// +// [] != [{"count":0, "type": "nvidia-tesla-k80"}] func suppressEmptyGuestAcceleratorDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { oldi, newi := d.GetChange("guest_accelerator") diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_from_template_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_from_template_test.go index 73e2e357bc..d2f60e5fde 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_from_template_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_from_template_test.go @@ -239,7 +239,7 @@ func testAccCheckComputeInstanceFromTemplateDestroyProducer(t *testing.T) func(s func testAccComputeInstanceFromTemplate_basic(instance, template string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -313,7 +313,7 @@ resource "google_compute_instance_from_template" "foobar" { func testAccComputeInstanceFromTemplate_overrideBootDisk(templateDisk, overrideDisk, template, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -378,7 +378,7 @@ resource "google_compute_instance_from_template" "inst" { func testAccComputeInstanceFromTemplate_overrideAttachedDisk(templateDisk, overrideDisk, template, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -416,7 +416,7 @@ resource "google_compute_instance_template" "template" { } disk { - source_image = "debian-cloud/debian-9" + source_image = "debian-cloud/debian-11" auto_delete = true boot = false } @@ -443,7 +443,7 @@ resource "google_compute_instance_from_template" "inst" { func testAccComputeInstanceFromTemplate_overrideScratchDisk(templateDisk, overrideDisk, template, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -505,7 +505,7 @@ resource "google_compute_instance_from_template" "inst" { func testAccComputeInstanceFromTemplate_overrideScheduling(templateDisk, template, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -556,7 +556,7 @@ func testAccComputeInstanceFromTemplate_012_removableFieldsTpl(template string) return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -633,7 +633,7 @@ resource "google_compute_instance_from_template" "inst" { func testAccComputeInstanceFromTemplate_overrideMetadataDotStartupScript(instance, template string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager.go index 5ee889c995..5f86a17e60 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager.go @@ -27,7 +27,6 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Update: schema.DefaultTimeout(15 * time.Minute), Delete: schema.DefaultTimeout(15 * time.Minute), }, - Schema: map[string]*schema.Schema{ "base_instance_name": { Type: schema.TypeString, @@ -267,7 +266,30 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, }, }, - + "all_instances_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies configuration that overrides the instance template configuration for the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata,`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The label key-value pairs that you want to patch onto the instance,`, + }, + }, + }, + }, "wait_for_instances": { Type: schema.TypeBool, Optional: true, @@ -279,7 +301,8 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Optional: true, Default: "STABLE", ValidateFunc: validation.StringInSlice([]string{"STABLE", "UPDATED"}, false), - Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective as well as all instances to be stable before returning.`, + + Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective and all instances configs to be effective as well as all instances to be stable before returning.`, }, "stateful_disk": { Type: schema.TypeSet, @@ -333,6 +356,20 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, }, }, + "all_instances_config": { + Type: schema.TypeList, + Computed: true, + Description: `Status of all-instances configuration on the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effective": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether this configuration has been applied to all managed instances in the group.`, + }, + }, + }, + }, "stateful": { Type: schema.TypeList, Computed: true, @@ -423,6 +460,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), Versions: expandVersions(d.Get("version").([]interface{})), UpdatePolicy: expandUpdatePolicy(d.Get("update_policy").([]interface{})), + AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})), StatefulPolicy: expandStatefulPolicy(d.Get("stateful_disk").(*schema.Set).List()), // Force send TargetSize to allow a value of 0. ForceSendFields: []string{"TargetSize"}, @@ -630,6 +668,11 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf if err = d.Set("update_policy", flattenUpdatePolicy(manager.UpdatePolicy)); err != nil { return fmt.Errorf("Error setting update_policy in state: %s", err.Error()) } + if manager.AllInstancesConfig != nil { + if err = d.Set("all_instances_config", flattenAllInstancesConfig(manager.AllInstancesConfig)); err != nil { + return fmt.Errorf("Error setting all_instances_config in state: %s", err.Error()) + } + } if err = d.Set("status", flattenStatus(manager.Status)); err != nil { return fmt.Errorf("Error setting status in state: %s", err.Error()) } @@ -695,6 +738,16 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte change = true } + if d.HasChange("all_instances_config") { + oldAic, newAic := d.GetChange("all_instances_config") + if newAic == nil || len(newAic.([]interface{})) == 0 { + updatedManager.NullFields = append(updatedManager.NullFields, "AllInstancesConfig") + } else { + updatedManager.AllInstancesConfig = expandAllInstancesConfig(oldAic.([]interface{}), newAic.([]interface{})) + } + change = true + } + if d.HasChange("stateful_disk") { updatedManager.StatefulPolicy = expandStatefulPolicy(d.Get("stateful_disk").(*schema.Set).List()) change = true @@ -838,7 +891,7 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte func computeIGMWaitForInstanceStatus(d *schema.ResourceData, meta interface{}) error { waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" conf := resource.StateChangeConf{ - Pending: []string{"creating", "error", "updating per instance configs", "reaching version target"}, + Pending: []string{"creating", "error", "updating per instance configs", "reaching version target", "updating all instances config"}, Target: []string{"created"}, Refresh: waitForInstancesRefreshFunc(getManager, waitForUpdates, d, meta), Timeout: d.Timeout(schema.TimeoutCreate), @@ -1020,6 +1073,58 @@ func flattenUpdatePolicy(updatePolicy *compute.InstanceGroupManagerUpdatePolicy) return results } +func expandAllInstancesConfig(old []interface{}, new []interface{}) *compute.InstanceGroupManagerAllInstancesConfig { + var properties *compute.InstancePropertiesPatch + for _, raw := range new { + properties = &compute.InstancePropertiesPatch{} + data := raw.(map[string]interface{}) + properties.Metadata = convertStringMap(data["metadata"].(map[string]interface{})) + if len(properties.Metadata) == 0 { + properties.NullFields = append(properties.NullFields, "Metadata") + } + properties.Labels = convertStringMap(data["labels"].(map[string]interface{})) + if len(properties.Labels) == 0 { + properties.NullFields = append(properties.NullFields, "Labels") + } + } + + if properties != nil { + for _, raw := range old { + data := raw.(map[string]interface{}) + for k := range data["metadata"].(map[string]interface{}) { + if _, exist := properties.Metadata[k]; !exist { + properties.NullFields = append(properties.NullFields, fmt.Sprintf("Metadata.%s", k)) + } + } + for k := range data["labels"].(map[string]interface{}) { + if _, exist := properties.Labels[k]; !exist { + properties.NullFields = append(properties.NullFields, fmt.Sprintf("Labels.%s", k)) + } + } + } + } + if properties != nil { + allInstancesConfig := &compute.InstanceGroupManagerAllInstancesConfig{} + allInstancesConfig.Properties = properties + return allInstancesConfig + } else { + return nil + } +} + +func flattenAllInstancesConfig(allInstancesConfig *compute.InstanceGroupManagerAllInstancesConfig) []map[string]interface{} { + results := []map[string]interface{}{} + props := map[string]interface{}{} + if len(allInstancesConfig.Properties.Metadata) > 0 { + props["metadata"] = allInstancesConfig.Properties.Metadata + } + if len(allInstancesConfig.Properties.Labels) > 0 { + props["labels"] = allInstancesConfig.Properties.Labels + } + results = append(results, props) + return results +} + func flattenStatus(status *compute.InstanceGroupManagerStatus) []map[string]interface{} { results := []map[string]interface{}{} data := map[string]interface{}{ @@ -1027,6 +1132,9 @@ func flattenStatus(status *compute.InstanceGroupManagerStatus) []map[string]inte "stateful": flattenStatusStateful(status.Stateful), "version_target": flattenStatusVersionTarget(status.VersionTarget), } + if status.AllInstancesConfig != nil { + data["all_instances_config"] = flattenStatusAllInstancesConfig(status.AllInstancesConfig) + } results = append(results, data) return results } @@ -1059,6 +1167,15 @@ func flattenStatusVersionTarget(versionTarget *compute.InstanceGroupManagerStatu return results } +func flattenStatusAllInstancesConfig(allInstancesConfig *compute.InstanceGroupManagerStatusAllInstancesConfig) []map[string]interface{} { + results := []map[string]interface{}{} + data := map[string]interface{}{ + "effective": allInstancesConfig.Effective, + } + results = append(results, data) + return results +} + func resourceInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { if err := d.Set("wait_for_instances", false); err != nil { return nil, fmt.Errorf("Error setting wait_for_instances: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager_test.go index 5e6f84acc6..4cef78efed 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager_test.go @@ -452,7 +452,7 @@ func testAccCheckInstanceGroupManagerDestroyProducer(t *testing.T) func(s *terra func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -517,7 +517,7 @@ resource "google_compute_instance_group_manager" "igm-no-tp" { func testAccInstanceGroupManager_targetSizeZero(template, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -560,7 +560,7 @@ resource "google_compute_instance_group_manager" "igm-basic" { func testAccInstanceGroupManager_update(template, target, description, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -608,6 +608,14 @@ resource "google_compute_instance_group_manager" "igm-update" { name = "customhttp" port = 8080 } + all_instances_config { + metadata = { + foo = "bar" + } + labels = { + doo = "dad" + } + } } `, template, target, description, igm) } @@ -616,7 +624,7 @@ resource "google_compute_instance_group_manager" "igm-update" { func testAccInstanceGroupManager_update2(template1, target1, target2, template2, description, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -698,6 +706,15 @@ resource "google_compute_instance_group_manager" "igm-update" { name = "customhttps" port = 8443 } + + all_instances_config { + metadata = { + doo = "dad" + } + labels = { + foo = "bar" + } + } } `, template1, target1, target2, template2, description, igm) } @@ -706,7 +723,7 @@ resource "google_compute_instance_group_manager" "igm-update" { func testAccInstanceGroupManager_update3(template1, target1, target2, template2, description2, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -791,7 +808,7 @@ resource "google_compute_instance_group_manager" "igm-update" { func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -842,7 +859,7 @@ resource "google_compute_instance_group_manager" "igm-update" { func testAccInstanceGroupManager_rollingUpdatePolicy(igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -897,7 +914,7 @@ resource "google_compute_instance_group_manager" "igm-rolling-update-policy" { func testAccInstanceGroupManager_rollingUpdatePolicy2(igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -949,7 +966,7 @@ resource "google_compute_instance_group_manager" "igm-rolling-update-policy" { func testAccInstanceGroupManager_rollingUpdatePolicy3(igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -998,7 +1015,7 @@ resource "google_compute_instance_group_manager" "igm-rolling-update-policy" { func testAccInstanceGroupManager_rollingUpdatePolicy4(igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1046,7 +1063,7 @@ resource "google_compute_instance_group_manager" "igm-rolling-update-policy" { func testAccInstanceGroupManager_rollingUpdatePolicy5(igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1095,7 +1112,7 @@ resource "google_compute_instance_group_manager" "igm-rolling-update-policy" { func testAccInstanceGroupManager_separateRegions(igm1, igm2 string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1152,7 +1169,7 @@ resource "google_compute_instance_group_manager" "igm-basic-2" { func testAccInstanceGroupManager_autoHealingPolicies(template, target, igm, hck string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1210,7 +1227,7 @@ resource "google_compute_http_health_check" "zero" { func testAccInstanceGroupManager_autoHealingPoliciesRemoved(template, target, igm, hck string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1264,7 +1281,7 @@ resource "google_compute_http_health_check" "zero" { func testAccInstanceGroupManager_versions(primaryTemplate string, canaryTemplate string, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1332,7 +1349,7 @@ resource "google_compute_instance_group_manager" "igm-basic" { func testAccInstanceGroupManager_stateful(template, target, igm, hck string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1404,7 +1421,7 @@ resource "google_compute_http_health_check" "zero" { func testAccInstanceGroupManager_statefulUpdated(template, target, igm, hck string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1481,7 +1498,7 @@ resource "google_compute_http_health_check" "zero" { func testAccInstanceGroupManager_waitForStatus(template, target, igm, perInstanceConfig string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1543,7 +1560,7 @@ resource "google_compute_per_instance_config" "per-instance" { func testAccInstanceGroupManager_waitForStatusUpdated(template, target, igm, perInstanceConfig string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1579,11 +1596,26 @@ resource "google_compute_instance_group_manager" "igm-basic" { name = "%s" version { instance_template = google_compute_instance_template.igm-basic.self_link - name = "prod" + name = "prod2" } target_pools = [google_compute_target_pool.igm-basic.self_link] base_instance_name = "tf-test-igm-basic" zone = "us-central1-c" + update_policy { + type = "PROACTIVE" + minimal_action = "REPLACE" + replacement_method = "RECREATE" + max_surge_fixed = 0 + max_unavailable_percent = 50 + } + all_instances_config { + metadata = { + doo = "dad" + } + labels = { + foo = "bar" + } + } wait_for_instances = true wait_for_instances_status = "UPDATED" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_test.go index 608c11b274..140c4f63ca 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_test.go @@ -321,7 +321,7 @@ func testAccComputeInstanceGroup_hasCorrectNetwork(t *testing.T, nInstanceGroup func testAccComputeInstanceGroup_basic(zone, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -376,7 +376,7 @@ resource "google_compute_instance_group" "empty" { func testAccComputeInstanceGroup_rename(instance, instanceGroup, backend, health string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -439,7 +439,7 @@ resource "google_compute_https_health_check" "healthcheck" { func testAccComputeInstanceGroup_update(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -482,7 +482,7 @@ resource "google_compute_instance_group" "update" { func testAccComputeInstanceGroup_update2(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -525,7 +525,7 @@ resource "google_compute_instance_group" "update" { func testAccComputeInstanceGroup_outOfOrderInstances(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -583,7 +583,7 @@ resource "google_compute_instance_group" "group" { func testAccComputeInstanceGroup_network(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_iam_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_iam_test.go index 97cea31bb2..a10b6d47ee 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_iam_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_iam_test.go @@ -62,7 +62,7 @@ func testAccComputeInstanceIamPolicy_basic(zone, instanceName, roleId string) st boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_migrate_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_migrate_test.go index f91dd9cf2b..7e54c8e689 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_migrate_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_migrate_test.go @@ -18,7 +18,7 @@ func TestAccComputeInstanceMigrateState(t *testing.T) { t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } cases := map[string]struct { StateVersion int @@ -92,7 +92,7 @@ func TestAccComputeInstanceMigrateState(t *testing.T) { Boot: true, AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, }, @@ -122,7 +122,7 @@ func TestAccComputeInstanceMigrateState_empty(t *testing.T) { t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } var is *terraform.InstanceState var meta interface{} @@ -150,7 +150,7 @@ func TestAccComputeInstanceMigrateState_bootDisk(t *testing.T) { t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } config := getInitializedConfig(t) zone := "us-central1-f" @@ -164,7 +164,7 @@ func TestAccComputeInstanceMigrateState_bootDisk(t *testing.T) { Boot: true, AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, }, @@ -218,7 +218,7 @@ func TestAccComputeInstanceMigrateState_v4FixBootDisk(t *testing.T) { t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } config := getInitializedConfig(t) zone := "us-central1-f" @@ -232,7 +232,7 @@ func TestAccComputeInstanceMigrateState_v4FixBootDisk(t *testing.T) { Boot: true, AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, }, @@ -285,7 +285,7 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromSource(t *testing.T) { t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } config := getInitializedConfig(t) zone := "us-central1-f" @@ -294,7 +294,7 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromSource(t *testing.T) { diskName := fmt.Sprintf("instance-test-%s", randString(t, 10)) disk := &compute.Disk{ Name: diskName, - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", Zone: zone, } op, err := config.NewComputeClient(config.userAgent).Disks.Insert(config.Project, zone, disk).Do() @@ -315,7 +315,7 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromSource(t *testing.T) { Boot: true, AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, { @@ -366,7 +366,7 @@ func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromSource(t *testing.T t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } config := getInitializedConfig(t) zone := "us-central1-f" @@ -375,7 +375,7 @@ func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromSource(t *testing.T diskName := fmt.Sprintf("instance-test-%s", randString(t, 10)) disk := &compute.Disk{ Name: diskName, - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", Zone: zone, } op, err := config.NewComputeClient(config.userAgent).Disks.Insert(config.Project, zone, disk).Do() @@ -396,7 +396,7 @@ func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromSource(t *testing.T Boot: true, AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, { @@ -446,7 +446,7 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromEncryptionKey(t *testing t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } config := getInitializedConfig(t) zone := "us-central1-f" @@ -459,13 +459,13 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromEncryptionKey(t *testing Boot: true, AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, { AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, DiskEncryptionKey: &compute.CustomerEncryptionKey{ RawKey: "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=", @@ -492,7 +492,7 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromEncryptionKey(t *testing attributes := map[string]string{ "boot_disk.#": "1", "disk.#": "1", - "disk.0.image": "projects/debian-cloud/global/images/family/debian-9", + "disk.0.image": "projects/debian-cloud/global/images/family/debian-11", "disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=", "disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=", "zone": zone, @@ -515,7 +515,7 @@ func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromEncryptionKey(t *te t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } config := getInitializedConfig(t) zone := "us-central1-f" @@ -528,13 +528,13 @@ func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromEncryptionKey(t *te Boot: true, AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, { AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, DiskEncryptionKey: &compute.CustomerEncryptionKey{ RawKey: "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=", @@ -561,7 +561,7 @@ func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromEncryptionKey(t *te attributes := map[string]string{ "boot_disk.#": "1", "disk.#": "1", - "disk.0.image": "projects/debian-cloud/global/images/family/debian-9", + "disk.0.image": "projects/debian-cloud/global/images/family/debian-11", "disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=", "disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=", "zone": zone, @@ -583,7 +583,7 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromAutoDeleteAndImage(t *te t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } config := getInitializedConfig(t) zone := "us-central1-f" @@ -596,19 +596,19 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromAutoDeleteAndImage(t *te Boot: true, AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, { AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, { AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/debian-9-stretch-v20180814", + SourceImage: "projects/debian-cloud/global/images/debian-11-bullseye-v20220719", }, }, }, @@ -632,9 +632,9 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromAutoDeleteAndImage(t *te attributes := map[string]string{ "boot_disk.#": "1", "disk.#": "2", - "disk.0.image": "projects/debian-cloud/global/images/debian-9-stretch-v20180814", + "disk.0.image": "projects/debian-cloud/global/images/debian-11-bullseye-v20220719", "disk.0.auto_delete": "true", - "disk.1.image": "global/images/family/debian-9", + "disk.1.image": "global/images/family/debian-11", "disk.1.auto_delete": "true", "zone": zone, } @@ -656,7 +656,7 @@ func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromAutoDeleteAndImage( t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } config := getInitializedConfig(t) zone := "us-central1-f" @@ -669,19 +669,19 @@ func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromAutoDeleteAndImage( Boot: true, AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, { AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, { AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/debian-9-stretch-v20180814", + SourceImage: "projects/debian-cloud/global/images/debian-11-bullseye-v20220719", }, }, }, @@ -705,9 +705,9 @@ func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromAutoDeleteAndImage( attributes := map[string]string{ "boot_disk.#": "1", "disk.#": "2", - "disk.0.image": "projects/debian-cloud/global/images/debian-9-stretch-v20180814", + "disk.0.image": "projects/debian-cloud/global/images/debian-11-bullseye-v20220719", "disk.0.auto_delete": "true", - "disk.1.image": "global/images/family/debian-9", + "disk.1.image": "global/images/family/debian-11", "disk.1.auto_delete": "true", "zone": zone, } @@ -728,7 +728,7 @@ func TestAccComputeInstanceMigrateState_scratchDisk(t *testing.T) { t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } config := getInitializedConfig(t) zone := "us-central1-f" @@ -742,7 +742,7 @@ func TestAccComputeInstanceMigrateState_scratchDisk(t *testing.T) { Boot: true, AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, { @@ -794,7 +794,7 @@ func TestAccComputeInstanceMigrateState_v4FixScratchDisk(t *testing.T) { t.Parallel() if os.Getenv(TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", TestEnvVar)) + t.Skipf("Network access not allowed; use %s=1 to enable", TestEnvVar) } config := getInitializedConfig(t) zone := "us-central1-f" @@ -808,7 +808,7 @@ func TestAccComputeInstanceMigrateState_v4FixScratchDisk(t *testing.T) { Boot: true, AutoDelete: true, InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: "projects/debian-cloud/global/images/family/debian-9", + SourceImage: "projects/debian-cloud/global/images/family/debian-11", }, }, { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template.go index 62552b3fbc..0d15ea0f00 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template.go @@ -24,6 +24,7 @@ var ( "scheduling.0.node_affinities", "scheduling.0.min_node_cpus", "scheduling.0.provisioning_model", + "scheduling.0.instance_termination_action", } shieldedInstanceTemplateConfigKeys = []string{ @@ -66,7 +67,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `The name of the instance template. If you leave this blank, Terraform will auto-generate a unique name.`, }, @@ -535,6 +536,13 @@ func resourceComputeInstanceTemplate() *schema.Resource { AtLeastOneOf: schedulingInstTemplateKeys, Description: `Whether the instance is spot. If this is set as SPOT.`, }, + "instance_termination_action": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Description: `Specifies the action GCE should take when SPOT VM is preempted.`, + }, }, }, }, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template_test.go index 7361e8bf81..066ab88374 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template_test.go @@ -1,4 +1,3 @@ -// package google import ( @@ -1117,6 +1116,7 @@ func TestAccComputeInstanceTemplate_spot(t *testing.T) { testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), testAccCheckComputeInstanceTemplatePreemptible(&instanceTemplate, true), testAccCheckComputeInstanceTemplateProvisioningModel(&instanceTemplate, "SPOT"), + testAccCheckComputeInstanceTemplateInstanceTerminationAction(&instanceTemplate, "STOP"), ), }, { @@ -1284,6 +1284,15 @@ func testAccCheckComputeInstanceTemplateProvisioningModel(instanceTemplate *comp } } +func testAccCheckComputeInstanceTemplateInstanceTerminationAction(instanceTemplate *compute.InstanceTemplate, instance_termination_action string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Scheduling.InstanceTerminationAction != instance_termination_action { + return fmt.Errorf("Expected instance_termination_action %v, got %v", instance_termination_action, instanceTemplate.Properties.Scheduling.InstanceTerminationAction) + } + return nil + } +} + func testAccCheckComputeInstanceTemplateAutomaticRestart(instanceTemplate *compute.InstanceTemplate, automaticRestart bool) resource.TestCheckFunc { return func(s *terraform.State) error { ar := instanceTemplate.Properties.Scheduling.AutomaticRestart @@ -1492,7 +1501,7 @@ func testAccCheckComputeInstanceTemplateHasDiskResourcePolicy(instanceTemplate * func testAccComputeInstanceTemplate_basic(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1589,7 +1598,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_preemptible(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1632,7 +1641,7 @@ resource "google_compute_address" "foo" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1666,7 +1675,7 @@ resource "google_compute_address" "foo" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1715,7 +1724,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_networkTier(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1740,7 +1749,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_networkIP(suffix, networkIP string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1768,7 +1777,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_networkIPAddress(suffix, ipAddress string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1796,7 +1805,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_disks(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1842,7 +1851,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_disksInvalid(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1914,7 +1923,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_regionDisks(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1957,7 +1966,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_subnet_auto(network, suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2003,7 +2012,7 @@ resource "google_compute_subnetwork" "subnetwork" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2080,7 +2089,7 @@ resource "google_compute_subnetwork" "subnetwork" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2112,7 +2121,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_startup_script(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2143,7 +2152,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_primaryAliasIpRange(i string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2190,7 +2199,7 @@ resource "google_compute_subnetwork" "inst-test-subnetwork" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2229,7 +2238,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_guestAccelerator(i string, count uint8) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2264,7 +2273,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_minCpuPlatform(i string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2296,7 +2305,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_encryptionKMS(suffix, kmsLink string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2330,7 +2339,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_soleTenantInstanceTemplate(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2370,7 +2379,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_reservationAffinityInstanceTemplate_nonSpecificReservation(templateName, consumeReservationType string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2399,7 +2408,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_reservationAffinityInstanceTemplate_specificReservation(templateName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2591,7 +2600,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_imageResourceTest(diskName string, imageName string, imageDescription string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2624,7 +2633,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_resourcePolicies(suffix string, policyName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } resource "google_compute_instance_template" "foobar" { @@ -2722,7 +2731,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_queueCount(instanceTemplateName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2749,7 +2758,7 @@ data "google_compute_default_service_account" "default" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2826,7 +2835,7 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_spot(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -2850,6 +2859,7 @@ resource "google_compute_instance_template" "foobar" { preemptible = true automatic_restart = false provisioning_model = "SPOT" + instance_termination_action = "STOP" } metadata = { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_test.go index 70eef1461d..20a0679fcf 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_test.go @@ -263,6 +263,7 @@ func TestAccComputeInstance_IPv6(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceIpv6AccessConfigHasExternalIPv6(&instance), ), }, { @@ -2226,6 +2227,7 @@ func TestAccComputeInstance_spotVM(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTerminationAction(&instance, "STOP"), ), }, computeInstanceImportStep("us-central1-a", instanceName, []string{}), @@ -2538,6 +2540,20 @@ func testAccCheckComputeInstanceAccessConfigHasNatIP(instance *compute.Instance) } } +func testAccCheckComputeInstanceIpv6AccessConfigHasExternalIPv6(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + for _, c := range i.Ipv6AccessConfigs { + if c.ExternalIpv6 == "" { + return fmt.Errorf("no External IPv6") + } + } + } + + return nil + } +} + func testAccCheckComputeInstanceAccessConfigHasPTR(instance *compute.Instance) resource.TestCheckFunc { return func(s *terraform.State) error { for _, i := range instance.NetworkInterfaces { @@ -2567,6 +2583,23 @@ func testAccCheckComputeResourcePolicy(instance *compute.Instance, scheduleName } } +func testAccCheckComputeInstanceTerminationAction(instance *compute.Instance, instanceTerminationActionWant string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance == nil { + return fmt.Errorf("instance is nil") + } + if instance.Scheduling == nil { + return fmt.Errorf("no scheduling") + } + + if instance.Scheduling.InstanceTerminationAction != instanceTerminationActionWant { + return fmt.Errorf("got the wrong instance termniation action: have: %s; want: %s", instance.Scheduling.InstanceTerminationAction, instanceTerminationActionWant) + } + + return nil + } +} + func testAccCheckComputeInstanceDisk(instance *compute.Instance, source string, delete bool, boot bool) resource.TestCheckFunc { return func(s *terraform.State) error { if instance.Disks == nil { @@ -3057,7 +3090,7 @@ func testAccCheckComputeInstanceHasStatus(instance *compute.Instance, status str func testAccComputeInstance_basic(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3097,7 +3130,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_basic2(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3128,7 +3161,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_basic3(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3159,7 +3192,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_basic4(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3190,7 +3223,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_basic5(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3221,7 +3254,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_basic_deletionProtectionFalse(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3249,7 +3282,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_basic_deletionProtectionTrue(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3279,7 +3312,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_forceNewAndChangeMetadata(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3312,7 +3345,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_update(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3350,7 +3383,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_ip(ip, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3387,7 +3420,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_ipv6(ip, instance, record string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3443,7 +3476,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_PTRRecord(record, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3476,7 +3509,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_networkTier(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3509,7 +3542,7 @@ func testAccComputeInstance_disks_encryption(bootEncryptionKey string, diskNameT sort.Strings(diskNames) return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3609,7 +3642,7 @@ func testAccComputeInstance_disks_encryption_restart(bootEncryptionKey string, d } return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3663,7 +3696,7 @@ func testAccComputeInstance_disks_encryption_restartUpdate(bootEncryptionKey str } return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3722,7 +3755,7 @@ data "google_project" "project" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3829,7 +3862,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_instanceSchedule(instance, schedule string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3868,7 +3901,7 @@ resource "google_compute_resource_policy" "instance_schedule" { func testAccComputeInstance_addResourcePolicy(instance, schedule string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3909,7 +3942,7 @@ resource "google_compute_resource_policy" "instance_schedule" { func testAccComputeInstance_updateResourcePolicy(instance, schedule1, schedule2 string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -3964,7 +3997,7 @@ resource "google_compute_resource_policy" "instance_schedule2" { func testAccComputeInstance_removeResourcePolicy(instance, schedule1, schedule2 string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4019,7 +4052,7 @@ resource "google_compute_resource_policy" "instance_schedule2" { func testAccComputeInstance_attachedDisk(disk, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4055,7 +4088,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_attachedDisk_sourceUrl(disk, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4091,7 +4124,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_attachedDisk_modeRo(disk, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4128,7 +4161,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_addAttachedDisk(disk, disk2, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4175,7 +4208,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_detachDisk(disk, disk2, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4218,7 +4251,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_updateAttachedDiskEncryptionKey(disk, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4258,7 +4291,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_bootDisk_source(disk, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4287,7 +4320,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_bootDisk_sourceUrl(disk, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4316,7 +4349,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_bootDisk_type(instance string, diskType string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4342,7 +4375,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_bootDisk_mode(instance string, diskMode string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4370,7 +4403,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_scratchDisk(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4403,7 +4436,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_serviceAccount(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4436,7 +4469,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_serviceAccount_update0(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4462,7 +4495,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_serviceAccount_update01(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4495,7 +4528,7 @@ data "google_compute_default_service_account" "default" { func testAccComputeInstance_serviceAccount_update02(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4529,7 +4562,7 @@ data "google_compute_default_service_account" "default" { func testAccComputeInstance_serviceAccount_update3(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4564,7 +4597,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_serviceAccount_update4(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } resource "google_compute_instance" "foobar" { @@ -4592,7 +4625,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_scheduling(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4621,7 +4654,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_schedulingUpdated(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4709,7 +4742,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_subnet_auto(suffix, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4742,7 +4775,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_subnet_custom(suffix, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4782,7 +4815,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_subnet_xpn(org, billingId, projectName, instance, suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4859,7 +4892,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_networkIPAuto(suffix, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4897,7 +4930,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_network_ip_custom(suffix, instance, ipAddress string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4936,7 +4969,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_private_image_family(disk, family, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -4977,7 +5010,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_networkPerformanceConfig(disk string, image string, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5026,7 +5059,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_multiNic(instance, network, subnetwork string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5123,7 +5156,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_guestAccelerator(instance string, count uint8) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5158,7 +5191,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_minCpuPlatform(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5185,7 +5218,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_primaryAliasIpRange(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5214,7 +5247,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_secondaryAliasIpRange(network, subnet, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5268,7 +5301,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_secondaryAliasIpRangeUpdate(network, subnet, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5315,7 +5348,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_hostname(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5344,7 +5377,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_stopInstanceToUpdate(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5381,7 +5414,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_stopInstanceToUpdate2(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5417,7 +5450,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_stopInstanceToUpdate3(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5444,7 +5477,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_withoutNodeAffinities(instance, nodeTemplate, nodeGroup string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5491,7 +5524,7 @@ resource "google_compute_node_group" "nodes" { func testAccComputeInstance_soleTenantNodeAffinities(instance, nodeTemplate, nodeGroup string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5560,7 +5593,7 @@ resource "google_compute_node_group" "nodes" { func testAccComputeInstance_soleTenantNodeAffinitiesUpdated(instance, nodeTemplate, nodeGroup string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5629,7 +5662,7 @@ resource "google_compute_node_group" "nodes" { func testAccComputeInstance_soleTenantNodeAffinitiesReduced(instance, nodeTemplate, nodeGroup string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5692,7 +5725,7 @@ resource "google_compute_node_group" "nodes" { func testAccComputeInstance_reservationAffinity_nonSpecificReservationConfig(instanceName, reservationType string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5720,7 +5753,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_reservationAffinity_specificReservationConfig(instanceName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5902,7 +5935,7 @@ func testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate( return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5937,7 +5970,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_desiredStatusTerminatedUpdate(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -5974,7 +6007,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_resourcePolicyCollocate(instance, suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -6049,7 +6082,7 @@ resource "google_compute_resource_policy" "foo" { func testAccComputeInstance_subnetworkUpdate(suffix, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -6127,7 +6160,7 @@ func testAccComputeInstance_subnetworkUpdate(suffix, instance string) string { func testAccComputeInstance_subnetworkUpdateTwo(suffix, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -6201,7 +6234,7 @@ func testAccComputeInstance_subnetworkUpdateTwo(suffix, instance string) string func testAccComputeInstance_queueCountSet(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -6251,6 +6284,7 @@ resource "google_compute_instance" "foobar" { provisioning_model = "SPOT" automatic_restart = false preemptible = true + instance_termination_action = "STOP" } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_interconnect_attachment_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_interconnect_attachment_generated_test.go index e298b52f37..7ca9019df3 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_interconnect_attachment_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_interconnect_attachment_generated_test.go @@ -59,7 +59,7 @@ resource "google_compute_interconnect_attachment" "on_prem" { } resource "google_compute_router" "foobar" { - name = "router%{random_suffix}" + name = "tf-test-router-1%{random_suffix}" network = google_compute_network.foobar.name bgp { asn = 16550 @@ -67,7 +67,7 @@ resource "google_compute_router" "foobar" { } resource "google_compute_network" "foobar" { - name = "network%{random_suffix}" + name = "tf-test-network-1%{random_suffix}" auto_create_subnetworks = false } `, context) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_machine_image_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_machine_image_generated_test.go index e707be7598..8f0ec5c0a2 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_machine_image_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_machine_image_generated_test.go @@ -57,7 +57,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -108,7 +108,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network.go index 4085a8b76f..b1fb18035d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network.go @@ -43,9 +43,10 @@ func resourceComputeNetwork() *schema.Resource { Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateGCEName, Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint.go index e8ebd5bfd7..da00fa666e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint.go @@ -54,12 +54,6 @@ range).`, DiffSuppressFunc: compareResourceNames, Description: `The network endpoint group this endpoint is part of.`, }, - "port": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: `Port number of network endpoint.`, - }, "instance": { Type: schema.TypeString, Optional: true, @@ -69,6 +63,12 @@ range).`, This is required for network endpoints of type GCE_VM_IP_PORT. The instance must be in the same zone of network endpoint group.`, }, + "port": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Port number of network endpoint.`, + }, "zone": { Type: schema.TypeString, Computed: true, @@ -285,7 +285,9 @@ func resourceComputeNetworkEndpointDelete(d *schema.ResourceData, meta interface if err != nil { return err } - toDelete["port"] = portProp + if portProp != 0 { + toDelete["port"] = portProp + } ipAddressProp, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, config) if err != nil { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group.go index 12a8531ec6..f18d54c037 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group.go @@ -43,7 +43,7 @@ func resourceComputeNetworkEndpointGroup() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match @@ -78,14 +78,16 @@ you create the resource.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT", ""}), + ValidateFunc: validateEnum([]string{"GCE_VM_IP", "GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT", ""}), Description: `Type of network endpoints in this network endpoint group. NON_GCP_PRIVATE_IP_PORT is used for hybrid connectivity network endpoint groups (see https://cloud.google.com/load-balancing/docs/hybrid). Note that NON_GCP_PRIVATE_IP_PORT can only be used with Backend Services that 1) have the following load balancing schemes: EXTERNAL, EXTERNAL_MANAGED, INTERNAL_MANAGED, and INTERNAL_SELF_MANAGED and 2) support the RATE or -CONNECTION balancing modes. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"]`, +CONNECTION balancing modes. + +Possible values include: GCE_VM_IP, GCE_VM_IP_PORT, and NON_GCP_PRIVATE_IP_PORT. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP", "GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"]`, Default: "GCE_VM_IP_PORT", }, "subnetwork": { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group_test.go index 6e1510bcdf..bae87c9fa9 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group_test.go @@ -31,6 +31,31 @@ func TestAccComputeNetworkEndpointGroup_networkEndpointGroup(t *testing.T) { }) } +func TestAccComputeNetworkEndpointGroup_internalEndpoint(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkEndpointGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetworkEndpointGroup_internalEndpoint(context), + }, + { + ResourceName: "google_compute_network_endpoint_group.neg", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network", "subnetwork", "zone"}, + }, + }, + }) +} + func testAccComputeNetworkEndpointGroup_networkEndpointGroup(context map[string]interface{}) string { return Nprintf(` resource "google_compute_network_endpoint_group" "neg" { @@ -46,3 +71,53 @@ resource "google_compute_network" "default" { } `, context) } + +func testAccComputeNetworkEndpointGroup_internalEndpoint(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_network_endpoint_group" "neg" { + name = "tf-test-my-lb-neg%{random_suffix}" + network = google_compute_network.internal.id + subnetwork = google_compute_subnetwork.internal.id + zone = "us-central1-a" + network_endpoint_type = "GCE_VM_IP" +} + +resource "google_compute_network_endpoint" "endpoint" { + network_endpoint_group = google_compute_network_endpoint_group.neg.name + #ip_address = "127.0.0.1" + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip +} + +resource "google_compute_network" "internal" { + name = "tf-test-neg-network%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "internal"{ + name = "tf-test-my-subnetwork%{random_suffix}" + network = google_compute_network.internal.id + ip_cidr_range = "10.128.0.0/20" + region = "us-central1" + private_ip_google_access= true +} + +resource "google_compute_instance" "default" { + name = "tf-test-neg-%{random_suffix}" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = "debian-8-jessie-v20160803" + } + } + + network_interface { + subnetwork = google_compute_subnetwork.internal.self_link + access_config { + } + } +} + +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_test.go index 33bcb0b6cc..69a6826bae 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_test.go @@ -175,7 +175,7 @@ resource "google_compute_instance" "default" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } `, context) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_peering.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_peering.go index 1efade4321..a5707c11cc 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_peering.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_peering.go @@ -36,7 +36,7 @@ func resourceComputeNetworkPeering() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the peering.`, }, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring.go index 57d8832cfb..d9954e162d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring.go @@ -114,7 +114,7 @@ set to true.`, "name": { Type: schema.TypeString, Required: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `The name of the packet mirroring rule`, }, "network": { @@ -169,10 +169,9 @@ destination (egress) IP in the IP header. Only IPv4 is supported.`, "ip_protocols": { Type: schema.TypeList, Optional: true, - Description: `Protocols that apply as a filter on mirrored traffic. Possible values: ["tcp", "udp", "icmp"]`, + Description: `Possible IP protocols including tcp, udp, icmp and esp`, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"tcp", "udp", "icmp"}), + Type: schema.TypeString, }, }, }, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring_generated_test.go index c98554054f..410b881f5c 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring_generated_test.go @@ -55,7 +55,7 @@ resource "google_compute_instance" "mirror" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_per_instance_config_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_per_instance_config_test.go index 37c37cc514..8e9967cb77 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_per_instance_config_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_per_instance_config_test.go @@ -238,7 +238,7 @@ resource "google_compute_disk" "disk1" { name = "test-disk2-%{random_suffix}" type = "pd-ssd" zone = google_compute_instance_group_manager.igm.zone - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" physical_block_size_bytes = 4096 } @@ -255,7 +255,7 @@ resource "google_compute_disk" "disk2" { func testAccComputePerInstanceConfig_igm(context map[string]interface{}) string { return Nprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler.go index f495759851..2d25c68982 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler.go @@ -379,7 +379,7 @@ to include directives regarding slower scale down, as described above.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. The name must be 1-63 characters long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler_generated_test.go index 9b292ebfaf..91955259e3 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler_generated_test.go @@ -71,7 +71,7 @@ resource "google_compute_instance_template" "foobar" { machine_type = "e2-standard-4" disk { - source_image = "debian-cloud/debian-9" + source_image = "debian-cloud/debian-11" disk_size_gb = 250 } @@ -116,7 +116,7 @@ resource "google_compute_region_instance_group_manager" "foobar" { } data "google_compute_image" "debian_9" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } `, context) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler_test.go index 37ab08710f..f8453c5ffb 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler_test.go @@ -116,7 +116,7 @@ func TestAccComputeRegionAutoscaler_scaleInControl(t *testing.T) { func testAccComputeRegionAutoscaler_scaffolding(itName, tpName, igmName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service.go index 72cc546d45..9c821575be 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service.go @@ -41,7 +41,7 @@ var backendServiceOnlyManagedFieldNames = []string{ // validateManagedBackendServiceBackends ensures capacity_scaler is set for each backend in a managed // backend service. To prevent a permadiff, we decided to override the API behavior and require the -//// capacity_scaler value in this case. +// capacity_scaler value in this case. // // The API: // - requires the sum of the backends' capacity_scalers be > 0 @@ -1017,8 +1017,11 @@ partial URL.`, Type: schema.TypeString, Optional: true, ValidateFunc: validateEnum([]string{"UTILIZATION", "RATE", "CONNECTION", ""}), - Description: `Specifies the balancing mode for this backend. Default value: "CONNECTION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, - Default: "CONNECTION", + Description: `Specifies the balancing mode for this backend. + +See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) +for an explanation of load balancing modes. Default value: "CONNECTION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, + Default: "CONNECTION", }, "capacity_scaler": { Type: schema.TypeFloat, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service_generated_test.go index e49583e533..906ffb8b35 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service_generated_test.go @@ -330,7 +330,7 @@ resource "google_compute_region_backend_service" "default" { } data "google_compute_image" "debian_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service_test.go index b4a3ad46ac..5dbeb5a3f1 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service_test.go @@ -464,7 +464,7 @@ resource "google_compute_instance_group" "group" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -546,7 +546,7 @@ func testAccComputeRegionBackendService_withBackend( serviceName, igName, itName, checkName string, timeout int64) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -613,7 +613,7 @@ func testAccComputeRegionBackendService_withBackendMultiNic( serviceName, net1Name, net2Name, igName, itName, checkName string, timeout int64) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -712,7 +712,7 @@ func testAccComputeRegionBackendService_withInvalidInternalBackend( serviceName, igName, itName, checkName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -795,7 +795,7 @@ resource "google_compute_region_backend_service" "default" { } data "google_compute_image" "debian_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_generated_test.go index a2cb353689..6c76cfafbb 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_generated_test.go @@ -62,7 +62,7 @@ resource "google_compute_region_disk" "regiondisk" { resource "google_compute_disk" "disk" { name = "tf-test-my-disk%{random_suffix}" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" size = 50 type = "pd-ssd" zone = "us-central1-a" diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_resource_policy_attachment_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_resource_policy_attachment_generated_test.go index d65ce7da7d..8ec7d5499c 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_resource_policy_attachment_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_resource_policy_attachment_generated_test.go @@ -58,7 +58,7 @@ resource "google_compute_region_disk_resource_policy_attachment" "attachment" { resource "google_compute_disk" "disk" { name = "tf-test-my-base-disk%{random_suffix}" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" size = 50 type = "pd-ssd" zone = "us-central1-a" @@ -93,7 +93,7 @@ resource "google_compute_resource_policy" "policy" { } data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } `, context) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_test.go index 9b68fc5d23..a8f6f56ee3 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_disk_test.go @@ -279,7 +279,7 @@ func testAccComputeRegionDisk_basic(diskName, refSelector string) string { return fmt.Sprintf(` resource "google_compute_disk" "disk" { name = "%s" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" size = 50 type = "pd-ssd" zone = "us-central1-a" @@ -304,7 +304,7 @@ func testAccComputeRegionDisk_basicUpdated(diskName, refSelector string) string return fmt.Sprintf(` resource "google_compute_disk" "disk" { name = "%s" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" size = 50 type = "pd-ssd" zone = "us-central1-a" @@ -337,7 +337,7 @@ func testAccComputeRegionDisk_encryption(diskName string) string { return fmt.Sprintf(` resource "google_compute_disk" "disk" { name = "%s" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" size = 50 type = "pd-ssd" zone = "us-central1-a" @@ -368,7 +368,7 @@ func testAccComputeRegionDisk_deleteDetach(instanceName, diskName, regionDiskNam return fmt.Sprintf(` resource "google_compute_disk" "disk" { name = "%s" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" size = 50 type = "pd-ssd" zone = "us-central1-a" @@ -395,7 +395,7 @@ resource "google_compute_instance" "inst" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager.go index c61ab6229e..365eec2797 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager.go @@ -180,7 +180,8 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { Optional: true, Default: "STABLE", ValidateFunc: validation.StringInSlice([]string{"STABLE", "UPDATED"}, false), - Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective as well as all instances to be stable before returning.`, + + Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective and all instances configs to be effective as well as all instances to be stable before returning.`, }, "auto_healing_policies": { @@ -312,7 +313,30 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, }, - + "all_instances_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies configuration that overrides the instance template configuration for the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata,`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The label key-value pairs that you want to patch onto the instance,`, + }, + }, + }, + }, "stateful_disk": { Type: schema.TypeSet, Optional: true, @@ -361,6 +385,20 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, }, + "all_instances_config": { + Type: schema.TypeList, + Computed: true, + Description: `Status of all-instances configuration on the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effective": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether this configuration has been applied to all managed instances in the group.`, + }, + }, + }, + }, "stateful": { Type: schema.TypeList, Computed: true, @@ -424,6 +462,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), Versions: expandVersions(d.Get("version").([]interface{})), UpdatePolicy: expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})), + AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})), DistributionPolicy: expandDistributionPolicy(d), StatefulPolicy: expandStatefulPolicy(d.Get("stateful_disk").(*schema.Set).List()), // Force send TargetSize to allow size of 0. @@ -461,7 +500,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met func computeRIGMWaitForInstanceStatus(d *schema.ResourceData, meta interface{}) error { waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" conf := resource.StateChangeConf{ - Pending: []string{"creating", "error", "updating per instance configs", "reaching version target"}, + Pending: []string{"creating", "error", "updating per instance configs", "reaching version target", "updating all instances config"}, Target: []string{"created"}, Refresh: waitForInstancesRefreshFunc(getRegionalManager, waitForUpdates, d, meta), Timeout: d.Timeout(schema.TimeoutCreate), @@ -520,6 +559,12 @@ func waitForInstancesRefreshFunc(f getInstanceManagerFunc, waitForUpdates bool, if !m.Status.VersionTarget.IsReached { return false, "reaching version target", nil } + if !m.Status.VersionTarget.IsReached { + return false, "reaching version target", nil + } + if !m.Status.AllInstancesConfig.Effective { + return false, "updating all instances config", nil + } } return true, "created", nil } else { @@ -595,6 +640,11 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta if err := d.Set("update_policy", flattenRegionUpdatePolicy(manager.UpdatePolicy)); err != nil { return fmt.Errorf("Error setting update_policy in state: %s", err.Error()) } + if manager.AllInstancesConfig != nil { + if err = d.Set("all_instances_config", flattenAllInstancesConfig(manager.AllInstancesConfig)); err != nil { + return fmt.Errorf("Error setting all_instances_config in state: %s", err.Error()) + } + } if err = d.Set("stateful_disk", flattenStatefulPolicy(manager.StatefulPolicy)); err != nil { return fmt.Errorf("Error setting stateful_disk in state: %s", err.Error()) } @@ -657,6 +707,16 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met change = true } + if d.HasChange("all_instances_config") { + oldAic, newAic := d.GetChange("all_instances_config") + if newAic == nil || len(newAic.([]interface{})) == 0 { + updatedManager.NullFields = append(updatedManager.NullFields, "AllInstancesConfig") + } else { + updatedManager.AllInstancesConfig = expandAllInstancesConfig(oldAic.([]interface{}), newAic.([]interface{})) + } + change = true + } + if d.HasChange("stateful_disk") { updatedManager.StatefulPolicy = expandStatefulPolicy(d.Get("stateful_disk").(*schema.Set).List()) change = true diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager_test.go index e6210aa1aa..d17e31f0a8 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager_test.go @@ -422,7 +422,7 @@ func testAccCheckRegionInstanceGroupManagerDestroyProducer(t *testing.T) func(s func testAccRegionInstanceGroupManager_basic(template, target, igm1, igm2 string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -486,7 +486,7 @@ resource "google_compute_region_instance_group_manager" "igm-no-tp" { func testAccRegionInstanceGroupManager_targetSizeZero(template, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -529,7 +529,7 @@ resource "google_compute_region_instance_group_manager" "igm-basic" { func testAccRegionInstanceGroupManager_update(template, target, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -577,6 +577,15 @@ resource "google_compute_region_instance_group_manager" "igm-update" { name = "customhttp" port = 8080 } + + all_instances_config { + metadata = { + foo = "bar" + } + labels = { + doo = "dad" + } + } } `, template, target, igm) } @@ -585,7 +594,7 @@ resource "google_compute_region_instance_group_manager" "igm-update" { func testAccRegionInstanceGroupManager_update2(template1, target1, target2, template2, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -667,6 +676,15 @@ resource "google_compute_region_instance_group_manager" "igm-update" { name = "customhttps" port = 8443 } + + all_instances_config { + metadata = { + doo = "dad" + } + labels = { + foo = "bar" + } + } } `, template1, target1, target2, template2, igm) } @@ -675,7 +693,7 @@ resource "google_compute_region_instance_group_manager" "igm-update" { func testAccRegionInstanceGroupManager_update3(template1, target1, target2, template2, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -760,7 +778,7 @@ resource "google_compute_region_instance_group_manager" "igm-update" { func testAccRegionInstanceGroupManager_updateLifecycle(tag, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -811,7 +829,7 @@ resource "google_compute_region_instance_group_manager" "igm-update" { func testAccRegionInstanceGroupManager_separateRegions(igm1, igm2 string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -868,7 +886,7 @@ resource "google_compute_region_instance_group_manager" "igm-basic-2" { func testAccRegionInstanceGroupManager_autoHealingPolicies(template, target, igm, hck string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -926,7 +944,7 @@ resource "google_compute_http_health_check" "zero" { func testAccRegionInstanceGroupManager_autoHealingPoliciesRemoved(template, target, igm, hck string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -980,7 +998,7 @@ resource "google_compute_http_health_check" "zero" { func testAccRegionInstanceGroupManager_versions(primaryTemplate string, canaryTemplate string, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1048,7 +1066,7 @@ resource "google_compute_region_instance_group_manager" "igm-basic" { func testAccRegionInstanceGroupManager_distributionPolicy(template, igm string, zones []string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1088,7 +1106,7 @@ resource "google_compute_region_instance_group_manager" "igm-basic" { func testAccRegionInstanceGroupManager_rollingUpdatePolicy(igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1146,7 +1164,7 @@ resource "google_compute_region_instance_group_manager" "igm-rolling-update-poli func testAccRegionInstanceGroupManager_rollingUpdatePolicySetToDefault(igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1205,7 +1223,7 @@ resource "google_compute_region_instance_group_manager" "igm-rolling-update-poli func testAccRegionInstanceGroupManager_rollingUpdatePolicy2(igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1258,7 +1276,7 @@ resource "google_compute_region_instance_group_manager" "igm-rolling-update-poli func testAccRegionInstanceGroupManager_rollingUpdatePolicy3(igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1314,7 +1332,7 @@ resource "google_compute_region_instance_group_manager" "igm-rolling-update-poli func testAccRegionInstanceGroupManager_stateful(template, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -1369,7 +1387,7 @@ resource "google_compute_region_instance_group_manager" "igm-basic" { func testAccRegionInstanceGroupManager_statefulUpdate(template, igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group.go index b3e7a70829..3ac25f3bf6 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group.go @@ -43,7 +43,7 @@ func resourceComputeRegionNetworkEndpointGroup() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match @@ -179,7 +179,7 @@ and { service="bar2", tag="foo2" } respectively.`, }, }, }, - ConflictsWith: []string{"app_engine", "cloud_function", "serverless_deployment"}, + ConflictsWith: []string{"cloud_function", "app_engine", "serverless_deployment"}, }, "description": { Type: schema.TypeString, @@ -187,6 +187,15 @@ and { service="bar2", tag="foo2" } respectively.`, ForceNew: true, Description: `An optional description of this resource. Provide this property when you create the resource.`, + }, + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: `This field is only used for PSC. +The URL of the network to which all network endpoints in the NEG belong. Uses +"default" project network if unspecified.`, }, "network_endpoint_type": { Type: schema.TypeString, @@ -245,7 +254,15 @@ API Gateway: Unused, App Engine: The service version, Cloud Functions: Unused, C }, }, }, - ConflictsWith: []string{"cloud_run", "app_engine", "cloud_function"}, + ConflictsWith: []string{"cloud_run", "cloud_function", "app_engine"}, + }, + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: `This field is only used for PSC. +Optional URL of the subnetwork to which all network endpoints in the NEG belong.`, }, "project": { Type: schema.TypeString, @@ -294,6 +311,18 @@ func resourceComputeRegionNetworkEndpointGroupCreate(d *schema.ResourceData, met } else if v, ok := d.GetOkExists("psc_target_service"); !isEmptyValue(reflect.ValueOf(pscTargetServiceProp)) && (ok || !reflect.DeepEqual(v, pscTargetServiceProp)) { obj["pscTargetService"] = pscTargetServiceProp } + networkProp, err := expandComputeRegionNetworkEndpointGroupNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + subnetworkProp, err := expandComputeRegionNetworkEndpointGroupSubnetwork(d.Get("subnetwork"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subnetwork"); !isEmptyValue(reflect.ValueOf(subnetworkProp)) && (ok || !reflect.DeepEqual(v, subnetworkProp)) { + obj["subnetwork"] = subnetworkProp + } cloudRunProp, err := expandComputeRegionNetworkEndpointGroupCloudRun(d.Get("cloud_run"), d, config) if err != nil { return err @@ -417,6 +446,12 @@ func resourceComputeRegionNetworkEndpointGroupRead(d *schema.ResourceData, meta if err := d.Set("psc_target_service", flattenComputeRegionNetworkEndpointGroupPscTargetService(res["pscTargetService"], d, config)); err != nil { return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) } + if err := d.Set("network", flattenComputeRegionNetworkEndpointGroupNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("subnetwork", flattenComputeRegionNetworkEndpointGroupSubnetwork(res["subnetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } if err := d.Set("cloud_run", flattenComputeRegionNetworkEndpointGroupCloudRun(res["cloudRun"], d, config)); err != nil { return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) } @@ -521,6 +556,20 @@ func flattenComputeRegionNetworkEndpointGroupPscTargetService(v interface{}, d * return v } +func flattenComputeRegionNetworkEndpointGroupNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + return ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRegionNetworkEndpointGroupSubnetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + return ConvertSelfLinkToV1(v.(string)) +} + func flattenComputeRegionNetworkEndpointGroupCloudRun(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -654,6 +703,22 @@ func expandComputeRegionNetworkEndpointGroupPscTargetService(v interface{}, d Te return v, nil } +func expandComputeRegionNetworkEndpointGroupNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRegionNetworkEndpointGroupSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for subnetwork: %s", err) + } + return f.RelativeLink(), nil +} + func expandComputeRegionNetworkEndpointGroupCloudRun(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group_generated_test.go index ac7be93192..3fdf91df4b 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group_generated_test.go @@ -43,7 +43,7 @@ func TestAccComputeRegionNetworkEndpointGroup_regionNetworkEndpointGroupFunction ResourceName: "google_compute_region_network_endpoint_group.function_neg", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"region"}, + ImportStateVerifyIgnore: []string{"network", "subnetwork", "region"}, }, }, }) @@ -106,7 +106,7 @@ func TestAccComputeRegionNetworkEndpointGroup_regionNetworkEndpointGroupCloudrun ResourceName: "google_compute_region_network_endpoint_group.cloudrun_neg", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"region"}, + ImportStateVerifyIgnore: []string{"network", "subnetwork", "region"}, }, }, }) @@ -163,7 +163,7 @@ func TestAccComputeRegionNetworkEndpointGroup_regionNetworkEndpointGroupAppengin ResourceName: "google_compute_region_network_endpoint_group.appengine_neg", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"region"}, + ImportStateVerifyIgnore: []string{"network", "subnetwork", "region"}, }, }, }) @@ -263,7 +263,7 @@ func TestAccComputeRegionNetworkEndpointGroup_regionNetworkEndpointGroupPscExamp ResourceName: "google_compute_region_network_endpoint_group.psc_neg", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"region"}, + ImportStateVerifyIgnore: []string{"network", "subnetwork", "region"}, }, }, }) @@ -281,6 +281,103 @@ resource "google_compute_region_network_endpoint_group" "psc_neg" { `, context) } +func TestAccComputeRegionNetworkEndpointGroup_regionNetworkEndpointGroupPscServiceAttachmentExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRegionNetworkEndpointGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionNetworkEndpointGroup_regionNetworkEndpointGroupPscServiceAttachmentExample(context), + }, + { + ResourceName: "google_compute_region_network_endpoint_group.psc_neg_service_attachment", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network", "subnetwork", "region"}, + }, + }, + }) +} + +func testAccComputeRegionNetworkEndpointGroup_regionNetworkEndpointGroupPscServiceAttachmentExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_compute_network" "default" { + name = "tf-test-psc-network%{random_suffix}" +} + +resource "google_compute_subnetwork" "default" { + name = "tf-test-psc-subnetwork%{random_suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "europe-west4" + network = google_compute_network.default.id +} + +resource "google_compute_subnetwork" "psc_subnetwork" { + name = "tf-test-psc-subnetwork-nat%{random_suffix}" + ip_cidr_range = "10.1.0.0/16" + region = "europe-west4" + purpose = "PRIVATE_SERVICE_CONNECT" + network = google_compute_network.default.id +} + +resource "google_compute_health_check" "default" { + name = "tf-test-psc-healthcheck%{random_suffix}" + + check_interval_sec = 1 + timeout_sec = 1 + tcp_health_check { + port = "80" + } +} +resource "google_compute_region_backend_service" "default" { + name = "tf-test-psc-backend%{random_suffix}" + region = "europe-west4" + + health_checks = [google_compute_health_check.default.id] +} + +resource "google_compute_forwarding_rule" "default" { + name = "tf-test-psc-forwarding-rule%{random_suffix}" + region = "europe-west4" + + load_balancing_scheme = "INTERNAL" + backend_service = google_compute_region_backend_service.default.id + all_ports = true + network = google_compute_network.default.name + subnetwork = google_compute_subnetwork.default.name +} + +resource "google_compute_service_attachment" "default" { + name = "tf-test-psc-service-attachment%{random_suffix}" + region = "europe-west4" + description = "A service attachment configured with Terraform" + + enable_proxy_protocol = false + connection_preference = "ACCEPT_AUTOMATIC" + nat_subnets = [google_compute_subnetwork.psc_subnetwork.self_link] + target_service = google_compute_forwarding_rule.default.self_link +} + +resource "google_compute_region_network_endpoint_group" "psc_neg_service_attachment" { + name = "tf-test-psc-neg%{random_suffix}" + region = "europe-west4" + + network_endpoint_type = "PRIVATE_SERVICE_CONNECT" + psc_target_service = google_compute_service_attachment.default.self_link + + network = google_compute_network.default.self_link + subnetwork = google_compute_subnetwork.default.self_link +} +`, context) +} + func testAccCheckComputeRegionNetworkEndpointGroupDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_per_instance_config_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_per_instance_config_test.go index 1002ba6f09..7ddbae65c2 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_per_instance_config_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_per_instance_config_test.go @@ -238,7 +238,7 @@ resource "google_compute_disk" "disk1" { name = "test-disk2-%{random_suffix}" type = "pd-ssd" zone = "us-central1-c" - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" physical_block_size_bytes = 4096 } @@ -255,7 +255,7 @@ resource "google_compute_disk" "disk2" { func testAccComputeRegionPerInstanceConfig_rigm(context map[string]interface{}) string { return Nprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_ssl_certificate.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_ssl_certificate.go index 8dee902a4e..67f9b077cd 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_ssl_certificate.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_ssl_certificate.go @@ -68,7 +68,7 @@ The chain must include at least one intermediate cert.`, Computed: true, Optional: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy.go index ddc3c983ed..3502adc366 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy.go @@ -94,7 +94,7 @@ exact number of VMs.`, }, }, }, - ConflictsWith: []string{"snapshot_schedule_policy", "instance_schedule_policy"}, + ConflictsWith: []string{"instance_schedule_policy", "snapshot_schedule_policy"}, }, "instance_schedule_policy": { Type: schema.TypeList, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy_test.go index f0b95e323c..a8ee8864a6 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy_test.go @@ -30,7 +30,7 @@ func TestAccComputeResourcePolicy_attached(t *testing.T) { func testAccComputeResourcePolicy_attached(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_route_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_route_test.go index dcdbbcc923..8d3fb436da 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_route_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_route_test.go @@ -63,7 +63,7 @@ resource "google_compute_route" "foobar" { func testAccComputeRoute_hopInstance(instanceName, zone, suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_router.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_router.go index 6568a43ef4..41f239f9ed 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_router.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_router.go @@ -66,7 +66,7 @@ func resourceComputeRouter() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_security_policy.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_security_policy.go index 6155485e90..f9937fcca6 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_security_policy.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_security_policy.go @@ -35,7 +35,7 @@ func resourceComputeSecurityPolicy() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `The name of the security policy.`, }, @@ -54,10 +54,11 @@ func resourceComputeSecurityPolicy() *schema.Resource { }, "type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: `The type indicates the intended use of the security policy. CLOUD_ARMOR - Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE - Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache.`, + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The type indicates the intended use of the security policy. CLOUD_ARMOR - Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE - Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache.`, + ValidateFunc: validation.StringInSlice([]string{"CLOUD_ARMOR", "CLOUD_ARMOR_EDGE", "CLOUD_ARMOR_INTERNAL_SERVICE"}, false), }, "rule": { @@ -202,10 +203,11 @@ func resourceComputeSecurityPolicy() *schema.Resource { }, "enforce_on_key": { - Type: schema.TypeString, - Optional: true, - Default: "ALL", - Description: `Determines the key to enforce the rateLimitThreshold on`, + Type: schema.TypeString, + Optional: true, + Default: "ALL", + Description: `Determines the key to enforce the rateLimitThreshold on`, + ValidateFunc: validation.StringInSlice([]string{"ALL", "IP", "HTTP_HEADER", "XFF_IP", "HTTP_COOKIE"}, false), }, "enforce_on_key_name": { @@ -531,6 +533,11 @@ func resourceComputeSecurityPolicyUpdate(d *schema.ResourceData, meta interface{ securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "AdvancedOptionsConfig", "advancedOptionsConfig.jsonParsing", "advancedOptionsConfig.logLevel") } + if d.HasChange("adaptive_protection_config") { + securityPolicy.AdaptiveProtectionConfig = expandSecurityPolicyAdaptiveProtectionConfig(d.Get("adaptive_protection_config").([]interface{})) + securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "AdaptiveProtectionConfig", "adaptiveProtectionConfig.layer7DdosDefenseConfig.enable", "adaptiveProtectionConfig.layer7DdosDefenseConfig.ruleVisibility") + } + if len(securityPolicy.ForceSendFields) > 0 { client := config.NewComputeClient(userAgent) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_snapshot_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_snapshot_generated_test.go index ad32efa9ba..5c2633eb7f 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_snapshot_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_snapshot_generated_test.go @@ -61,7 +61,7 @@ resource "google_compute_snapshot" "snapshot" { } data "google_compute_image" "debian" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_snapshot_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_snapshot_test.go index 817b2149e5..ce40bb33cd 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_snapshot_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_snapshot_test.go @@ -60,7 +60,7 @@ func TestAccComputeSnapshot_encryptionCMEK(t *testing.T) { func testAccComputeSnapshot_encryption(snapshotName string, diskName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ssl_certificate.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ssl_certificate.go index 20e8967387..da5238a047 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ssl_certificate.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ssl_certificate.go @@ -68,7 +68,7 @@ The chain must include at least one intermediate cert.`, Computed: true, Optional: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_subnetwork.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_subnetwork.go index 59dd5e28cf..977e3f6615 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_subnetwork.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_subnetwork.go @@ -83,7 +83,7 @@ non-overlapping within a network. Only IPv4 is supported.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters @@ -250,7 +250,7 @@ secondary IP ranges within a network. Only IPv4 is supported.`, "range_name": { Type: schema.TypeString, Required: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy.go index 275601d15f..987aca14d1 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy.go @@ -53,17 +53,6 @@ first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.`, }, - "ssl_certificates": { - Type: schema.TypeList, - Required: true, - Description: `A list of SslCertificate resources that are used to authenticate -connections between users and the load balancer. At least one SSL -certificate must be specified.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, "url_map": { Type: schema.TypeString, Required: true, @@ -71,6 +60,14 @@ certificate must be specified.`, Description: `A reference to the UrlMap resource that defines the mapping from URL to the BackendService.`, }, + "certificate_map": { + Type: schema.TypeString, + Optional: true, + Description: `A reference to the CertificateMap resource uri that identifies a certificate map +associated with the given target proxy. This field can only be set for global target proxies. +Accepted format is '//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificateMaps/{resourceName}'.`, + ExactlyOneOf: []string{"ssl_certificates", "certificate_map"}, + }, "description": { Type: schema.TypeString, Optional: true, @@ -96,6 +93,18 @@ specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Default value: "NONE" Possible values: ["NONE", "ENABLE", "DISABLE"]`, Default: "NONE", }, + "ssl_certificates": { + Type: schema.TypeList, + Optional: true, + Description: `A list of SslCertificate resources that are used to authenticate +connections between users and the load balancer. At least one SSL +certificate must be specified.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + ExactlyOneOf: []string{"ssl_certificates", "certificate_map"}, + }, "ssl_policy": { Type: schema.TypeString, Optional: true, @@ -161,6 +170,12 @@ func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interfac } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(reflect.ValueOf(sslCertificatesProp)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { obj["sslCertificates"] = sslCertificatesProp } + certificateMapProp, err := expandComputeTargetHttpsProxyCertificateMap(d.Get("certificate_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_map"); !isEmptyValue(reflect.ValueOf(certificateMapProp)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { + obj["certificateMap"] = certificateMapProp + } sslPolicyProp, err := expandComputeTargetHttpsProxySslPolicy(d.Get("ssl_policy"), d, config) if err != nil { return err @@ -278,6 +293,9 @@ func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{ if err := d.Set("ssl_certificates", flattenComputeTargetHttpsProxySslCertificates(res["sslCertificates"], d, config)); err != nil { return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) } + if err := d.Set("certificate_map", flattenComputeTargetHttpsProxyCertificateMap(res["certificateMap"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } if err := d.Set("ssl_policy", flattenComputeTargetHttpsProxySslPolicy(res["sslPolicy"], d, config)); err != nil { return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) } @@ -379,6 +397,40 @@ func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interfac return err } } + if d.HasChange("certificate_map") { + obj := make(map[string]interface{}) + + certificateMapProp, err := expandComputeTargetHttpsProxyCertificateMap(d.Get("certificate_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_map"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { + obj["certificateMap"] = certificateMapProp + } + + url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}/setCertificateMap") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) + } + + err = computeOperationWaitTime( + config, res, project, "Updating TargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } if d.HasChange("ssl_policy") { obj := make(map[string]interface{}) @@ -562,6 +614,10 @@ func flattenComputeTargetHttpsProxySslCertificates(v interface{}, d *schema.Reso return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) } +func flattenComputeTargetHttpsProxyCertificateMap(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenComputeTargetHttpsProxySslPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return v @@ -608,6 +664,10 @@ func expandComputeTargetHttpsProxySslCertificates(v interface{}, d TerraformReso return req, nil } +func expandComputeTargetHttpsProxyCertificateMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandComputeTargetHttpsProxySslPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { f, err := parseGlobalFieldValue("sslPolicies", v.(string), "project", d, config, true) if err != nil { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy_test.go index acf022fc60..f71923727e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy_test.go @@ -11,6 +11,7 @@ import ( const ( canonicalSslCertificateTemplate = "https://www.googleapis.com/compute/v1/projects/%s/global/sslCertificates/%s" + canonicalCertificateMapTemplate = "//certificatemanager.googleapis.com/projects/%s/locations/global/certificateMaps/%s" ) func TestAccComputeTargetHttpsProxy_update(t *testing.T) { @@ -48,6 +49,30 @@ func TestAccComputeTargetHttpsProxy_update(t *testing.T) { }) } +func TestAccComputeTargetHttpsProxy_certificateMap(t *testing.T) { + t.Parallel() + + var proxy compute.TargetHttpsProxy + resourceSuffix := randString(t, 10) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeTargetHttpsProxy_certificateMap(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + t, "google_compute_target_https_proxy.foobar", &proxy), + testAccComputeTargetHttpsProxyDescription("Resource created for Terraform acceptance testing", &proxy), + testAccComputeTargetHttpsProxyHasCertificateMap(t, "certificatemap-test-"+resourceSuffix, &proxy), + ), + }, + }, + }) +} + func testAccCheckComputeTargetHttpsProxyExists(t *testing.T, n string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -102,6 +127,19 @@ func testAccComputeTargetHttpsProxyHasSslCertificate(t *testing.T, cert string, } } +func testAccComputeTargetHttpsProxyHasCertificateMap(t *testing.T, certificateMap string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := googleProviderConfig(t) + certificateMapUrl := fmt.Sprintf(canonicalCertificateMapTemplate, config.Project, certificateMap) + + if ConvertSelfLinkToV1(proxy.CertificateMap) == certificateMapUrl { + return nil + } + + return fmt.Errorf("certificate map not found: expected'%s'", certificateMapUrl) + } +} + func testAccComputeTargetHttpsProxy_basic1(id string) string { return fmt.Sprintf(` resource "google_compute_target_https_proxy" "foobar" { @@ -238,3 +276,61 @@ resource "google_compute_ssl_certificate" "foobar2" { } `, id, id, id, id, id, id, id) } + +func testAccComputeTargetHttpsProxy_certificateMap(id string) string { + return fmt.Sprintf(` +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = google_compute_url_map.foobar.self_link + certificate_map = "//certificatemanager.googleapis.com/${google_certificate_manager_certificate_map.map.id}" +} + +resource "google_compute_backend_service" "foobar" { + name = "httpsproxy-test-backend-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "httpsproxy-test-health-check-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "httpsproxy-test-url-map-%s" + default_service = google_compute_backend_service.foobar.self_link +} + +resource "google_certificate_manager_certificate_map" "map" { + name = "certificatemap-test-%s" +} + +resource "google_certificate_manager_certificate_map_entry" "map_entry" { + name = "certificatemapentry-test-%s" + map = google_certificate_manager_certificate_map.map.name + certificates = [google_certificate_manager_certificate.certificate.id] + matcher = "PRIMARY" +} + +resource "google_certificate_manager_certificate" "certificate" { + name = "certificate-test-%s" + scope = "DEFAULT" + managed { + domains = [ + google_certificate_manager_dns_authorization.instance.domain, + ] + dns_authorizations = [ + google_certificate_manager_dns_authorization.instance.id, + ] + } +} + +resource "google_certificate_manager_dns_authorization" "instance" { + name = "dnsauthorization-test-%s" + domain = "mysite.com" +} + +`, id, id, id, id, id, id, id, id) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_instance_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_instance_generated_test.go index cd9a69be36..c10eafc3de 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_instance_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_instance_generated_test.go @@ -56,7 +56,7 @@ resource "google_compute_target_instance" "default" { } data "google_compute_image" "vmimage" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_pool_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_pool_test.go index af3e51bce5..39878e51f7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_pool_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_pool_test.go @@ -150,7 +150,7 @@ func testAccCheckComputeTargetPoolHealthCheck(targetPool, healthCheck string) re func testAccComputeTargetPool_basic(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } @@ -210,7 +210,7 @@ resource "google_compute_instance" "foo" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -226,7 +226,7 @@ resource "google_compute_instance" "bar" { boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_vpn_gateway_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_vpn_gateway_generated_test.go index 3c55191367..124408795a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_vpn_gateway_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_vpn_gateway_generated_test.go @@ -51,12 +51,12 @@ func TestAccComputeVpnGateway_targetVpnGatewayBasicExample(t *testing.T) { func testAccComputeVpnGateway_targetVpnGatewayBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_vpn_gateway" "target_gateway" { - name = "vpn1%{random_suffix}" + name = "tf-test-vpn-1%{random_suffix}" network = google_compute_network.network1.id } resource "google_compute_network" "network1" { - name = "network1%{random_suffix}" + name = "tf-test-network-1%{random_suffix}" } resource "google_compute_address" "vpn_static_ip" { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_vpn_tunnel_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_vpn_tunnel_generated_test.go index edbc0a71c4..619eb4b92b 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_vpn_tunnel_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_vpn_tunnel_generated_test.go @@ -51,7 +51,7 @@ func TestAccComputeVpnTunnel_vpnTunnelBasicExample(t *testing.T) { func testAccComputeVpnTunnel_vpnTunnelBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_vpn_tunnel" "tunnel1" { - name = "tunnel1%{random_suffix}" + name = "tf-test-tunnel-1%{random_suffix}" peer_ip = "15.0.0.120" shared_secret = "a secret message" @@ -65,12 +65,12 @@ resource "google_compute_vpn_tunnel" "tunnel1" { } resource "google_compute_vpn_gateway" "target_gateway" { - name = "vpn1%{random_suffix}" + name = "tf-test-vpn-1%{random_suffix}" network = google_compute_network.network1.id } resource "google_compute_network" "network1" { - name = "network1%{random_suffix}" + name = "tf-test-network-1%{random_suffix}" } resource "google_compute_address" "vpn_static_ip" { @@ -140,7 +140,7 @@ func testAccComputeVpnTunnel_vpnTunnelBetaExample(context map[string]interface{} return Nprintf(` resource "google_compute_vpn_tunnel" "tunnel1" { provider = google-beta - name = "tunnel1%{random_suffix}" + name = "tf-test-tunnel-1%{random_suffix}" peer_ip = "15.0.0.120" shared_secret = "a secret message" @@ -159,13 +159,13 @@ resource "google_compute_vpn_tunnel" "tunnel1" { resource "google_compute_vpn_gateway" "target_gateway" { provider = google-beta - name = "vpn1%{random_suffix}" + name = "tf-test-vpn-1%{random_suffix}" network = google_compute_network.network1.id } resource "google_compute_network" "network1" { provider = google-beta - name = "network1%{random_suffix}" + name = "tf-test-network-1%{random_suffix}" } resource "google_compute_address" "vpn_static_ip" { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster.go index de8cda6c61..17589ac773 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster.go @@ -117,7 +117,6 @@ func resourceContainerAwsCluster() *schema.Resource { Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Logging configuration.", MaxItems: 1, Elem: ContainerAwsClusterLoggingConfigSchema(), @@ -240,7 +239,6 @@ func ContainerAwsClusterControlPlaneSchema() *schema.Resource { "iam_instance_profile": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The name of the AWS IAM instance pofile to assign to each control plane replica.", }, @@ -543,7 +541,6 @@ func ContainerAwsClusterLoggingConfigSchema() *schema.Resource { Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Configuration of the logging components.", MaxItems: 1, Elem: ContainerAwsClusterLoggingConfigComponentConfigSchema(), @@ -559,7 +556,6 @@ func ContainerAwsClusterLoggingConfigComponentConfigSchema() *schema.Resource { Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Components of the logging configuration to be enabled.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -612,12 +608,12 @@ func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -634,7 +630,7 @@ func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) } else { client.Config.BasePath = bp } - res, err := client.ApplyCluster(context.Background(), obj, createDirective...) + res, err := client.ApplyCluster(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -1361,7 +1357,6 @@ func flattenContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray } return items } - func expandContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnum { objs := o.([]interface{}) items := make([]containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster_generated_test.go index ed206ffb81..b08d331f99 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster_generated_test.go @@ -299,11 +299,13 @@ resource "google_container_aws_cluster" "primary" { func testAccContainerAwsCluster_BetaBasicHandWritten(context map[string]interface{}) string { return Nprintf(` data "google_container_aws_versions" "versions" { + provider = google-beta project = "%{project_name}" location = "us-west1" } resource "google_container_aws_cluster" "primary" { + provider = google-beta authorization { admin_users { username = "%{service_acct}" @@ -399,11 +401,13 @@ resource "google_container_aws_cluster" "primary" { func testAccContainerAwsCluster_BetaBasicHandWrittenUpdate0(context map[string]interface{}) string { return Nprintf(` data "google_container_aws_versions" "versions" { + provider = google-beta project = "%{project_name}" location = "us-west1" } resource "google_container_aws_cluster" "primary" { + provider = google-beta authorization { admin_users { username = "%{service_acct}" diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool.go index 93fc48f010..f512b39543 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool.go @@ -192,7 +192,6 @@ func ContainerAwsNodePoolConfigSchema() *schema.Resource { "iam_instance_profile": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The name of the AWS IAM role assigned to nodes in the pool.", }, @@ -218,6 +217,7 @@ func ContainerAwsNodePoolConfigSchema() *schema.Resource { Type: schema.TypeString, Computed: true, Optional: true, + ForceNew: true, Description: "Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.", }, @@ -225,7 +225,7 @@ func ContainerAwsNodePoolConfigSchema() *schema.Resource { Type: schema.TypeMap, Optional: true, ForceNew: true, - Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -429,12 +429,12 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -451,7 +451,7 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} } else { client.Config.BasePath = bp } - res, err := client.ApplyNodePool(context.Background(), obj, createDirective...) + res, err := client.ApplyNodePool(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool_generated_test.go index 9057c45b97..fb8e52b958 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool_generated_test.go @@ -432,11 +432,13 @@ resource "google_container_aws_node_pool" "primary" { func testAccContainerAwsNodePool_BetaBasicHandWritten(context map[string]interface{}) string { return Nprintf(` data "google_container_aws_versions" "versions" { + provider = google-beta project = "%{project_name}" location = "us-west1" } resource "google_container_aws_cluster" "primary" { + provider = google-beta authorization { admin_users { username = "%{service_acct}" @@ -517,6 +519,7 @@ resource "google_container_aws_cluster" "primary" { resource "google_container_aws_node_pool" "primary" { + provider = google-beta autoscaling { max_node_count = 5 min_node_count = 1 @@ -595,11 +598,13 @@ resource "google_container_aws_node_pool" "primary" { func testAccContainerAwsNodePool_BetaBasicHandWrittenUpdate0(context map[string]interface{}) string { return Nprintf(` data "google_container_aws_versions" "versions" { + provider = google-beta project = "%{project_name}" location = "us-west1" } resource "google_container_aws_cluster" "primary" { + provider = google-beta authorization { admin_users { username = "%{service_acct}" @@ -679,6 +684,7 @@ resource "google_container_aws_cluster" "primary" { } resource "google_container_aws_node_pool" "primary" { + provider = google-beta autoscaling { max_node_count = 5 min_node_count = 1 diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_client.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_client.go index 772e2912a2..0b710d61c9 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_client.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_client.go @@ -116,12 +116,12 @@ func resourceContainerAzureClientCreate(d *schema.ResourceData, meta interface{} Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClients/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -138,7 +138,7 @@ func resourceContainerAzureClientCreate(d *schema.ResourceData, meta interface{} } else { client.Config.BasePath = bp } - res, err := client.ApplyClient(context.Background(), obj, createDirective...) + res, err := client.ApplyClient(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster.go index 0894791d47..64f3bef45a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster.go @@ -132,7 +132,6 @@ func resourceContainerAzureCluster() *schema.Resource { Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Logging configuration.", MaxItems: 1, Elem: ContainerAzureClusterLoggingConfigSchema(), @@ -230,7 +229,6 @@ func ContainerAzureClusterControlPlaneSchema() *schema.Resource { "ssh_config": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: "SSH configuration for how to access the underlying control plane machines.", MaxItems: 1, Elem: ContainerAzureClusterControlPlaneSshConfigSchema(), @@ -319,7 +317,6 @@ func ContainerAzureClusterControlPlaneSshConfigSchema() *schema.Resource { "authorized_key": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", }, }, @@ -464,7 +461,6 @@ func ContainerAzureClusterLoggingConfigSchema() *schema.Resource { Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Configuration of the logging components.", MaxItems: 1, Elem: ContainerAzureClusterLoggingConfigComponentConfigSchema(), @@ -480,7 +476,6 @@ func ContainerAzureClusterLoggingConfigComponentConfigSchema() *schema.Resource Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Components of the logging configuration to be enabled.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -535,12 +530,12 @@ func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{ Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -557,7 +552,7 @@ func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{ } else { client.Config.BasePath = bp } - res, err := client.ApplyCluster(context.Background(), obj, createDirective...) + res, err := client.ApplyCluster(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -1253,7 +1248,6 @@ func flattenContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArr } return items } - func expandContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnum { objs := o.([]interface{}) items := make([]containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster_generated_test.go index fa0bc35798..d23fbe3ed3 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster_generated_test.go @@ -258,9 +258,11 @@ func testAccContainerAzureCluster_BetaBasicHandWritten(context map[string]interf data "google_container_azure_versions" "versions" { project = "%{project_name}" location = "us-west1" + provider = google-beta } resource "google_container_azure_cluster" "primary" { + provider = google-beta authorization { admin_users { username = "mmv2@google.com" @@ -303,6 +305,7 @@ resource "google_container_azure_cluster" "primary" { } resource "google_container_azure_client" "basic" { + provider = google-beta application_id = "%{azure_app}" location = "us-west1" name = "tf-test-client-name%{random_suffix}" @@ -319,9 +322,11 @@ func testAccContainerAzureCluster_BetaBasicHandWrittenUpdate0(context map[string data "google_container_azure_versions" "versions" { project = "%{project_name}" location = "us-west1" + provider = google-beta } resource "google_container_azure_cluster" "primary" { + provider = google-beta authorization { admin_users { username = "mmv2@google.com" @@ -398,6 +403,7 @@ resource "google_container_azure_cluster" "primary" { } resource "google_container_azure_client" "basic" { + provider = google-beta application_id = "%{azure_app}" location = "us-west1" name = "tf-test-client-name%{random_suffix}" diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool.go index d2677c2ba0..fb6c696bf4 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool.go @@ -48,7 +48,6 @@ func resourceContainerAzureNodePool() *schema.Resource { "autoscaling": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: "Autoscaler configuration for this node pool.", MaxItems: 1, Elem: ContainerAzureNodePoolAutoscalingSchema(), @@ -65,7 +64,6 @@ func resourceContainerAzureNodePool() *schema.Resource { "config": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: "The node configuration of the node pool.", MaxItems: 1, Elem: ContainerAzureNodePoolConfigSchema(), @@ -176,14 +174,12 @@ func ContainerAzureNodePoolAutoscalingSchema() *schema.Resource { "max_node_count": { Type: schema.TypeInt, Required: true, - ForceNew: true, Description: "Maximum number of nodes in the node pool. Must be >= min_node_count.", }, "min_node_count": { Type: schema.TypeInt, Required: true, - ForceNew: true, Description: "Minimum number of nodes in the node pool. Must be >= 1 and <= max_node_count.", }, }, @@ -196,7 +192,6 @@ func ContainerAzureNodePoolConfigSchema() *schema.Resource { "ssh_config": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: "SSH configuration for how to access the node pool machines.", MaxItems: 1, Elem: ContainerAzureNodePoolConfigSshConfigSchema(), @@ -254,7 +249,6 @@ func ContainerAzureNodePoolConfigSshConfigSchema() *schema.Resource { "authorized_key": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", }, }, @@ -329,12 +323,12 @@ func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -351,7 +345,7 @@ func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface } else { client.Config.BasePath = bp } - res, err := client.ApplyNodePool(context.Background(), obj, createDirective...) + res, err := client.ApplyNodePool(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool_generated_test.go index fbcb89ab6a..0b63d95215 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool_generated_test.go @@ -312,9 +312,11 @@ func testAccContainerAzureNodePool_BetaBasicHandWritten(context map[string]inter data "google_container_azure_versions" "versions" { project = "%{project_name}" location = "us-west1" + provider = google-beta } resource "google_container_azure_cluster" "primary" { + provider = google-beta authorization { admin_users { username = "mmv2@google.com" @@ -351,6 +353,7 @@ resource "google_container_azure_cluster" "primary" { } resource "google_container_azure_client" "basic" { + provider = google-beta application_id = "%{azure_app}" location = "us-west1" name = "tf-test-client-name%{random_suffix}" @@ -359,6 +362,7 @@ resource "google_container_azure_client" "basic" { } resource "google_container_azure_node_pool" "primary" { + provider = google-beta autoscaling { max_node_count = 3 min_node_count = 2 @@ -413,12 +417,14 @@ resource "google_container_azure_node_pool" "primary" { func testAccContainerAzureNodePool_BetaBasicHandWrittenUpdate0(context map[string]interface{}) string { return Nprintf(` data "google_container_azure_versions" "versions" { + provider = google-beta project = "%{project_name}" location = "us-west1" } resource "google_container_azure_cluster" "primary" { + provider = google-beta authorization { admin_users { username = "mmv2@google.com" @@ -455,6 +461,7 @@ resource "google_container_azure_cluster" "primary" { } resource "google_container_azure_client" "basic" { + provider = google-beta application_id = "%{azure_app}" location = "us-west1" name = "tf-test-client-name%{random_suffix}" @@ -463,6 +470,7 @@ resource "google_container_azure_client" "basic" { } resource "google_container_azure_node_pool" "primary" { + provider = google-beta autoscaling { max_node_count = 3 min_node_count = 2 diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go old mode 100644 new mode 100755 index ab85197c05..50a1e83763 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go @@ -60,9 +60,9 @@ var ( "addons_config.0.network_policy_config", "addons_config.0.cloudrun_config", "addons_config.0.gcp_filestore_csi_driver_config", - "addons_config.0.istio_config", "addons_config.0.dns_cache_config", "addons_config.0.gce_persistent_disk_csi_driver_config", + "addons_config.0.istio_config", "addons_config.0.kalm_config", "addons_config.0.config_connector_config", "addons_config.0.gke_backup_agent_config", @@ -288,31 +288,6 @@ func resourceContainerCluster() *schema.Resource { }, }, }, - "istio_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: addonsConfigKeys, - MaxItems: 1, - Description: `The status of the Istio addon.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disabled": { - Type: schema.TypeBool, - Required: true, - Description: `The status of the Istio addon, which makes it easy to set up Istio for services in a cluster. It is disabled by default. Set disabled = false to enable.`, - }, - "auth": { - Type: schema.TypeString, - Optional: true, - // We can't use a Terraform-level default because it won't be true when the block is disabled: true - DiffSuppressFunc: emptyOrDefaultStringSuppress("AUTH_NONE"), - ValidateFunc: validation.StringInSlice([]string{"AUTH_NONE", "AUTH_MUTUAL_TLS"}, false), - Description: `The authentication type between services in Istio. Available options include AUTH_MUTUAL_TLS.`, - }, - }, - }, - }, "dns_cache_config": { Type: schema.TypeList, Optional: true, @@ -336,7 +311,7 @@ func resourceContainerCluster() *schema.Resource { Computed: true, AtLeastOneOf: addonsConfigKeys, MaxItems: 1, - Description: `Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Defaults to disabled; set enabled = true to enable.`, + Description: `Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Defaults to enabled; set disabled = true to disable.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -346,6 +321,31 @@ func resourceContainerCluster() *schema.Resource { }, }, }, + "istio_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Istio addon.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + Description: `The status of the Istio addon, which makes it easy to set up Istio for services in a cluster. It is disabled by default. Set disabled = false to enable.`, + }, + "auth": { + Type: schema.TypeString, + Optional: true, + // We can't use a Terraform-level default because it won't be true when the block is disabled: true + DiffSuppressFunc: emptyOrDefaultStringSuppress("AUTH_NONE"), + ValidateFunc: validation.StringInSlice([]string{"AUTH_NONE", "AUTH_MUTUAL_TLS"}, false), + Description: `The authentication type between services in Istio. Available options include AUTH_MUTUAL_TLS.`, + }, + }, + }, + }, "kalm_config": { Type: schema.TypeList, Optional: true, @@ -473,6 +473,12 @@ func resourceContainerCluster() *schema.Resource { DiffSuppressFunc: emptyOrDefaultStringSuppress("automatic"), Description: `Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as Intel Haswell.`, }, + "boot_disk_kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.`, + }, }, }, }, @@ -505,11 +511,38 @@ func resourceContainerCluster() *schema.Resource { }, "enable_binary_authorization": { - Default: false, Type: schema.TypeBool, Optional: true, + Default: false, + Deprecated: "Deprecated in favor of binary_authorization.", Description: `Enable Binary Authorization for this cluster. If enabled, all container images will be validated by Google Binary Authorization.`, - ConflictsWith: []string{"enable_autopilot"}, + ConflictsWith: []string{"enable_autopilot", "binary_authorization"}, + }, + "binary_authorization": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: BinaryAuthorizationDiffSuppress, + MaxItems: 1, + Description: "Configuration options for the Binary Authorization feature.", + ConflictsWith: []string{"enable_binary_authorization"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Deprecated: "Deprecated in favor of evaluation_mode.", + Description: "Enable Binary Authorization for this cluster.", + ConflictsWith: []string{"enable_autopilot", "binary_authorization.0.evaluation_mode"}, + }, + "evaluation_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "PROJECT_SINGLETON_POLICY_ENFORCE"}, false), + Description: "Mode of operation for Binary Authorization policy evaluation.", + ConflictsWith: []string{"binary_authorization.0.enabled"}, + }, + }, + }, }, "enable_kubernetes_alpha": { @@ -555,7 +588,6 @@ func resourceContainerCluster() *schema.Resource { Type: schema.TypeList, Optional: true, Computed: true, - ForceNew: true, MaxItems: 1, Description: `Configuration for the Google Groups for GKE feature.`, Elem: &schema.Resource{ @@ -563,7 +595,6 @@ func resourceContainerCluster() *schema.Resource { "security_group": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: `The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com.`, }, }, @@ -723,10 +754,10 @@ func resourceContainerCluster() *schema.Resource { Type: schema.TypeList, Optional: true, Computed: true, - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS and WORKLOADS.`, + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, SCHEDULER, and WORKLOADS.`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "WORKLOADS"}, false), + ValidateFunc: validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER", "WORKLOADS"}, false), }, }, "managed_prometheus": { @@ -1088,14 +1119,14 @@ func resourceContainerCluster() *schema.Resource { Required: true, ForceNew: true, DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, - Description: `Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking.`, + Description: `When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. This field only applies to private clusters, when enable_private_nodes is true.`, }, "enable_private_nodes": { Type: schema.TypeBool, Optional: true, ForceNew: true, DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, - Description: `When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. This field only applies to private clusters, when enable_private_nodes is true.`, + Description: `Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking.`, }, "master_ipv4_cidr_block": { Type: schema.TypeString, @@ -1216,6 +1247,23 @@ func resourceContainerCluster() *schema.Resource { }, }, + "mesh_certificates": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `If set, and enable_certificates=true, the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_certificates": { + Type: schema.TypeBool, + Required: true, + Description: `When enabled the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster.`, + }, + }, + }, + }, + "database_encryption": { Type: schema.TypeList, MaxItems: 1, @@ -1503,10 +1551,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er IpAllocationPolicy: ipAllocationBlock, PodSecurityPolicyConfig: expandPodSecurityPolicyConfig(d.Get("pod_security_policy_config")), Autoscaling: expandClusterAutoscaling(d.Get("cluster_autoscaling"), d), - BinaryAuthorization: &container.BinaryAuthorization{ - Enabled: d.Get("enable_binary_authorization").(bool), - ForceSendFields: []string{"Enabled"}, - }, + BinaryAuthorization: expandBinaryAuthorization(d.Get("binary_authorization"), d.Get("enable_binary_authorization").(bool)), Autopilot: &container.Autopilot{ Enabled: d.Get("enable_autopilot").(bool), ForceSendFields: []string{"Enabled"}, @@ -1614,6 +1659,10 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er cluster.VerticalPodAutoscaling = expandVerticalPodAutoscaling(v) } + if v, ok := d.GetOk("mesh_certificates"); ok { + cluster.MeshCertificates = expandMeshCertificates(v) + } + if v, ok := d.GetOk("database_encryption"); ok { cluster.DatabaseEncryption = expandDatabaseEncryption(v) } @@ -1856,8 +1905,17 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro if err := d.Set("cluster_autoscaling", flattenClusterAutoscaling(cluster.Autoscaling)); err != nil { return err } - if err := d.Set("enable_binary_authorization", cluster.BinaryAuthorization != nil && cluster.BinaryAuthorization.Enabled); err != nil { - return fmt.Errorf("Error setting enable_binary_authorization: %s", err) + binauthz_enabled := d.Get("binary_authorization.0.enabled").(bool) + legacy_binauthz_enabled := d.Get("enable_binary_authorization").(bool) + if !binauthz_enabled { + if err := d.Set("enable_binary_authorization", cluster.BinaryAuthorization != nil && cluster.BinaryAuthorization.Enabled); err != nil { + return fmt.Errorf("Error setting enable_binary_authorization: %s", err) + } + } + if !legacy_binauthz_enabled { + if err := d.Set("binary_authorization", flattenBinaryAuthorization(cluster.BinaryAuthorization)); err != nil { + return err + } } if cluster.Autopilot != nil { if err := d.Set("enable_autopilot", cluster.Autopilot.Enabled); err != nil { @@ -1948,6 +2006,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro return err } + if err := d.Set("mesh_certificates", flattenMeshCertificates(cluster.MeshCertificates)); err != nil { + return err + } + if err := d.Set("database_encryption", flattenDatabaseEncryption(cluster.DatabaseEncryption)); err != nil { return err } @@ -2098,6 +2160,22 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s's binary authorization has been updated to %v", d.Id(), enabled) } + if d.HasChange("binary_authorization") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredBinaryAuthorization: expandBinaryAuthorization(d.Get("binary_authorization"), d.Get("enable_binary_authorization").(bool)), + }, + } + + updateF := updateFunc(req, "updating GKE binary authorization") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's binary authorization has been updated to %v", d.Id(), req.Update.DesiredBinaryAuthorization) + } + if d.HasChange("enable_shielded_nodes") { enabled := d.Get("enable_shielded_nodes").(bool) req := &container.UpdateClusterRequest{ @@ -2255,6 +2333,21 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s L4 ILB Subsetting has been updated to %v", d.Id(), enabled) } + if d.HasChange("authenticator_groups_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredAuthenticatorGroupsConfig: expandContainerClusterAuthenticatorGroupsConfig(d.Get("authenticator_groups_config")), + }, + } + updateF := updateFunc(req, "updating GKE cluster authenticator groups config") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s authenticator groups config has been updated", d.Id()) + } + if d.HasChange("default_snat_status") { req := &container.UpdateClusterRequest{ Update: &container.ClusterUpdate{ @@ -2660,6 +2753,33 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("mesh_certificates") { + c := d.Get("mesh_certificates") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredMeshCertificates: expandMeshCertificates(c), + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + // Wait until it's updated + return containerOperationWait(config, op, project, location, "updating GKE cluster mesh certificates config", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s mesh certificates config has been updated", d.Id()) + } + if d.HasChange("database_encryption") { c := d.Get("database_encryption") req := &container.UpdateClusterRequest{ @@ -3115,15 +3235,6 @@ func expandClusterAddonsConfig(configured interface{}) *container.AddonsConfig { } } - if v, ok := config["istio_config"]; ok && len(v.([]interface{})) > 0 { - addon := v.([]interface{})[0].(map[string]interface{}) - ac.IstioConfig = &container.IstioConfig{ - Disabled: addon["disabled"].(bool), - Auth: addon["auth"].(string), - ForceSendFields: []string{"Disabled"}, - } - } - if v, ok := config["dns_cache_config"]; ok && len(v.([]interface{})) > 0 { addon := v.([]interface{})[0].(map[string]interface{}) ac.DnsCacheConfig = &container.DnsCacheConfig{ @@ -3140,6 +3251,15 @@ func expandClusterAddonsConfig(configured interface{}) *container.AddonsConfig { } } + if v, ok := config["istio_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.IstioConfig = &container.IstioConfig{ + Disabled: addon["disabled"].(bool), + Auth: addon["auth"].(string), + ForceSendFields: []string{"Disabled"}, + } + } + if v, ok := config["kalm_config"]; ok && len(v.([]interface{})) > 0 { addon := v.([]interface{})[0].(map[string]interface{}) ac.KalmConfig = &container.KalmConfig{ @@ -3346,6 +3466,7 @@ func expandAutoProvisioningDefaults(configured interface{}, d *schema.ResourceDa OauthScopes: convertStringArr(config["oauth_scopes"].([]interface{})), ServiceAccount: config["service_account"].(string), ImageType: config["image_type"].(string), + BootDiskKmsKey: config["boot_disk_kms_key"].(string), } cpu := config["min_cpu_platform"].(string) @@ -3402,6 +3523,21 @@ func expandNotificationConfig(configured interface{}) *container.NotificationCon } } +func expandBinaryAuthorization(configured interface{}, legacy_enabled bool) *container.BinaryAuthorization { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return &container.BinaryAuthorization{ + Enabled: legacy_enabled, + ForceSendFields: []string{"Enabled"}, + } + } + config := l[0].(map[string]interface{}) + return &container.BinaryAuthorization{ + Enabled: config["enabled"].(bool), + EvaluationMode: config["evaluation_mode"].(string), + } +} + func expandConfidentialNodes(configured interface{}) *container.ConfidentialNodes { l := configured.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -3518,6 +3654,18 @@ func expandVerticalPodAutoscaling(configured interface{}) *container.VerticalPod } } +func expandMeshCertificates(configured interface{}) *container.MeshCertificates { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &container.MeshCertificates{ + EnableCertificates: config["enable_certificates"].(bool), + ForceSendFields: []string{"EnableCertificates"}, + } +} + func expandDatabaseEncryption(configured interface{}) *container.DatabaseEncryption { l := configured.([]interface{}) if len(l) == 0 { @@ -3695,6 +3843,18 @@ func expandMonitoringConfig(configured interface{}) *container.MonitoringConfig return mc } +func expandContainerClusterAuthenticatorGroupsConfig(configured interface{}) *container.AuthenticatorGroupsConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.AuthenticatorGroupsConfig{ + SecurityGroup: config["security_group"].(string), + } +} + func flattenNotificationConfig(c *container.NotificationConfig) []map[string]interface{} { if c == nil { return nil @@ -3712,6 +3872,17 @@ func flattenNotificationConfig(c *container.NotificationConfig) []map[string]int } } +func flattenBinaryAuthorization(c *container.BinaryAuthorization) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + "evaluation_mode": c.EvaluationMode, + }) + } + return result +} + func flattenConfidentialNodes(c *container.ConfidentialNodes) []map[string]interface{} { result := []map[string]interface{}{} if c != nil { @@ -3785,15 +3956,6 @@ func flattenClusterAddonsConfig(c *container.AddonsConfig) []map[string]interfac result["cloudrun_config"] = []map[string]interface{}{cloudRunConfig} } - if c.IstioConfig != nil { - result["istio_config"] = []map[string]interface{}{ - { - "disabled": c.IstioConfig.Disabled, - "auth": c.IstioConfig.Auth, - }, - } - } - if c.DnsCacheConfig != nil { result["dns_cache_config"] = []map[string]interface{}{ { @@ -3810,6 +3972,15 @@ func flattenClusterAddonsConfig(c *container.AddonsConfig) []map[string]interfac } } + if c.IstioConfig != nil { + result["istio_config"] = []map[string]interface{}{ + { + "disabled": c.IstioConfig.Disabled, + "auth": c.IstioConfig.Auth, + }, + } + } + if c.KalmConfig != nil { result["kalm_config"] = []map[string]interface{}{ { @@ -4102,6 +4273,7 @@ func flattenAutoProvisioningDefaults(a *container.AutoprovisioningNodePoolDefaul r["service_account"] = a.ServiceAccount r["image_type"] = a.ImageType r["min_cpu_platform"] = a.MinCpuPlatform + r["boot_disk_kms_key"] = a.BootDiskKmsKey return []map[string]interface{}{r} } @@ -4160,6 +4332,17 @@ func flattenResourceUsageExportConfig(c *container.ResourceUsageExportConfig) [] } } +func flattenMeshCertificates(c *container.MeshCertificates) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "enable_certificates": c.EnableCertificates, + }, + } +} + func flattenDatabaseEncryption(c *container.DatabaseEncryption) []map[string]interface{} { if c == nil { return nil @@ -4426,3 +4609,15 @@ func containerClusterNetworkPolicyDiffSuppress(k, old, new string, r *schema.Res return false } + +func BinaryAuthorizationDiffSuppress(k, old, new string, r *schema.ResourceData) bool { + // An empty config is equivalent to a config with enabled set to false. + if k == "binary_authorization.#" && old == "1" && new == "0" { + o, _ := r.GetChange("binary_authorization.0.enabled") + if !o.(bool) && !r.HasChange("binary_authorization.0.evaluation_mode") { + return true + } + } + + return false +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster_test.go old mode 100644 new mode 100755 index 277722069a..3cd3e3e74b --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster_test.go @@ -182,15 +182,16 @@ func TestAccContainerCluster_withAddons(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, }, - { - Config: testAccContainerCluster_withInternalLoadBalancer(pid, clusterName), - }, - { - ResourceName: "google_container_cluster.primary", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"min_master_version"}, - }, + // Issue with cloudrun_config addon: https://github.com/hashicorp/terraform-provider-google/issues/11943 + // { + // Config: testAccContainerCluster_withInternalLoadBalancer(pid, clusterName), + // }, + // { + // ResourceName: "google_container_cluster.primary", + // ImportState: true, + // ImportStateVerify: true, + // ImportStateVerifyIgnore: []string{"min_master_version"}, + // }, }, }) } @@ -349,7 +350,6 @@ func TestAccContainerCluster_withMasterAuthConfig_NoCert(t *testing.T) { func TestAccContainerCluster_withAuthenticatorGroupsConfig(t *testing.T) { t.Parallel() clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) - containerNetName := fmt.Sprintf("tf-test-container-net-%s", randString(t, 10)) orgDomain := getTestOrgDomainFromEnv(t) vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -357,10 +357,38 @@ func TestAccContainerCluster_withAuthenticatorGroupsConfig(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerCluster_withAuthenticatorGroupsConfig(containerNetName, clusterName, orgDomain), + Config: testAccContainerCluster_basic(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_container_cluster.primary", + "authenticator_groups_config.0.enabled"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerCluster_withAuthenticatorGroupsConfigUpdate(clusterName, orgDomain), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", + "authenticator_groups_config.0.security_group", fmt.Sprintf("gke-security-groups@%s", orgDomain)), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, }, { - ResourceName: "google_container_cluster.with_authenticator_groups", + Config: testAccContainerCluster_withAuthenticatorGroupsConfigUpdate2(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_container_cluster.primary", + "authenticator_groups_config.0.enabled"), + ), + }, + { + ResourceName: "google_container_cluster.primary", ImportState: true, ImportStateVerify: true, }, @@ -2200,7 +2228,100 @@ func TestAccContainerCluster_sharedVpc(t *testing.T) { }) } -func TestAccContainerCluster_withBinaryAuthorization(t *testing.T) { +func TestAccContainerCluster_withBinaryAuthorizationEnabledBool(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withBinaryAuthorizationEnabledBool(clusterName, true), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_enabled_bool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"enable_binary_authorization"}, + }, + { + Config: testAccContainerCluster_withBinaryAuthorizationEnabledBool(clusterName, false), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_enabled_bool", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerCluster_withBinaryAuthorizationEnabledBoolLegacy(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withBinaryAuthorizationEnabledBoolLegacy(clusterName, true), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_enabled_bool_legacy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"enable_binary_authorization", "binary_authorization.#", "binary_authorization.0.%", "binary_authorization.0.enabled", "binary_authorization.0.evaluation_mode"}, + }, + { + Config: testAccContainerCluster_withBinaryAuthorizationEnabledBoolLegacy(clusterName, false), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_enabled_bool_legacy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"enable_binary_authorization"}, + }, + }, + }) +} + +func TestAccContainerCluster_withBinaryAuthorizationEvaluationModeAutopilot(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withBinaryAuthorizationEvaluationMode(clusterName, true, "PROJECT_SINGLETON_POLICY_ENFORCE"), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_evaluation_mode", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerCluster_withBinaryAuthorizationEvaluationMode(clusterName, true, "DISABLED"), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_evaluation_mode", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerCluster_withBinaryAuthorizationEvaluationModeClassic(t *testing.T) { t.Parallel() clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) @@ -2211,18 +2332,18 @@ func TestAccContainerCluster_withBinaryAuthorization(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerCluster_withBinaryAuthorization(clusterName, true), + Config: testAccContainerCluster_withBinaryAuthorizationEvaluationMode(clusterName, false, "PROJECT_SINGLETON_POLICY_ENFORCE"), }, { - ResourceName: "google_container_cluster.with_binary_authorization", + ResourceName: "google_container_cluster.with_binary_authorization_evaluation_mode", ImportState: true, ImportStateVerify: true, }, { - Config: testAccContainerCluster_withBinaryAuthorization(clusterName, false), + Config: testAccContainerCluster_withBinaryAuthorizationEvaluationMode(clusterName, false, "DISABLED"), }, { - ResourceName: "google_container_cluster.with_binary_authorization", + ResourceName: "google_container_cluster.with_binary_authorization_evaluation_mode", ImportState: true, ImportStateVerify: true, }, @@ -2286,6 +2407,30 @@ func TestAccContainerCluster_nodeAutoprovisioningDefaultsImageType(t *testing.T) }) } +func TestAccContainerCluster_nodeAutoprovisioningDefaultsBootDiskKmsKey(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + kms := BootstrapKMSKeyInLocation(t, "us-central1") + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioningDefaultsBootDiskKmsKey(getTestProjectFromEnv(), clusterName, kms.CryptoKey.Name), + }, + { + ResourceName: "google_container_cluster.nap_boot_disk_kms_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + }, + }) +} + func TestAccContainerCluster_errorCleanDanglingCluster(t *testing.T) { t.Parallel() @@ -2340,6 +2485,48 @@ func TestAccContainerCluster_errorNoClusterCreated(t *testing.T) { }) } +func TestAccContainerCluster_withMeshCertificatesConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + pid := getTestProjectFromEnv() + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withMeshCertificatesConfigEnabled(pid, clusterName), + }, + { + ResourceName: "google_container_cluster.with_mesh_certificates_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, + }, + { + Config: testAccContainerCluster_updateMeshCertificatesConfig(pid, clusterName, true), + }, + { + ResourceName: "google_container_cluster.with_mesh_certificates_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, + }, + { + Config: testAccContainerCluster_updateMeshCertificatesConfig(pid, clusterName, false), + }, + { + ResourceName: "google_container_cluster.with_mesh_certificates_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, + }, + }, + }) +} + func TestAccContainerCluster_withDatabaseEncryption(t *testing.T) { t.Parallel() @@ -2722,8 +2909,10 @@ resource "google_container_cluster" "primary" { enabled = true } + binary_authorization { + evaluation_mode = "PROJECT_SINGLETON_POLICY_ENFORCE" + } enable_intranode_visibility = true - enable_binary_authorization = true } `, name) } @@ -2753,8 +2942,10 @@ resource "google_container_cluster" "primary" { enabled = true } + binary_authorization { + evaluation_mode = "PROJECT_SINGLETON_POLICY_ENFORCE" + } enable_intranode_visibility = true - enable_binary_authorization = true } `, name) } @@ -2792,16 +2983,16 @@ resource "google_container_cluster" "primary" { cloudrun_config { disabled = true } - istio_config { - disabled = true - auth = "AUTH_MUTUAL_TLS" - } dns_cache_config { enabled = false } gce_persistent_disk_csi_driver_config { enabled = false } + istio_config { + disabled = true + auth = "AUTH_MUTUAL_TLS" + } kalm_config { enabled = false } @@ -2847,18 +3038,20 @@ resource "google_container_cluster" "primary" { enabled = true } cloudrun_config { - disabled = false - } - istio_config { - disabled = false - auth = "AUTH_NONE" + # https://github.com/hashicorp/terraform-provider-google/issues/11943 + # disabled = false + disabled = true } dns_cache_config { enabled = true } gce_persistent_disk_csi_driver_config { enabled = true - } + } + istio_config { + disabled = false + auth = "AUTH_NONE" + } kalm_config { enabled = true } @@ -2873,41 +3066,42 @@ resource "google_container_cluster" "primary" { `, projectID, clusterName) } -func testAccContainerCluster_withInternalLoadBalancer(projectID string, clusterName string) string { - return fmt.Sprintf(` -data "google_project" "project" { - project_id = "%s" -} - -resource "google_container_cluster" "primary" { - name = "%s" - location = "us-central1-a" - initial_node_count = 1 - - min_master_version = "latest" - - workload_identity_config { - workload_pool = "${data.google_project.project.project_id}.svc.id.goog" - } - - addons_config { - http_load_balancing { - disabled = false - } - horizontal_pod_autoscaling { - disabled = false - } - network_policy_config { - disabled = false - } - cloudrun_config { - disabled = false - load_balancer_type = "LOAD_BALANCER_TYPE_INTERNAL" - } - } -} -`, projectID, clusterName) -} +// Issue with cloudrun_config addon: https://github.com/hashicorp/terraform-provider-google/issues/11943/ +// func testAccContainerCluster_withInternalLoadBalancer(projectID string, clusterName string) string { +// return fmt.Sprintf(` +// data "google_project" "project" { +// project_id = "%s" +// } + +// resource "google_container_cluster" "primary" { +// name = "%s" +// location = "us-central1-a" +// initial_node_count = 1 + +// min_master_version = "latest" + +// workload_identity_config { +// workload_pool = "${data.google_project.project.project_id}.svc.id.goog" +// } + +// addons_config { +// http_load_balancing { +// disabled = false +// } +// horizontal_pod_autoscaling { +// disabled = false +// } +// network_policy_config { +// disabled = false +// } +// cloudrun_config { +// disabled = false +// load_balancer_type = "LOAD_BALANCER_TYPE_INTERNAL" +// } +// } +// } +// `, projectID, clusterName) +// } func testAccContainerCluster_withNotificationConfig(clusterName string, topic string) string { return fmt.Sprintf(` @@ -3150,49 +3344,32 @@ resource "google_container_cluster" "with_network_policy_enabled" { `, clusterName) } -func testAccContainerCluster_withAuthenticatorGroupsConfig(containerNetName string, clusterName string, orgDomain string) string { +func testAccContainerCluster_withAuthenticatorGroupsConfigUpdate(name string, orgDomain string) string { return fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "%s" - auto_create_subnetworks = false -} - -resource "google_compute_subnetwork" "container_subnetwork" { - name = google_compute_network.container_network.name - network = google_compute_network.container_network.name - ip_cidr_range = "10.0.36.0/24" - region = "us-central1" - private_ip_google_access = true - - secondary_ip_range { - range_name = "pod" - ip_cidr_range = "10.0.0.0/19" - } +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 - secondary_ip_range { - range_name = "svc" - ip_cidr_range = "10.0.32.0/22" - } + authenticator_groups_config { + security_group = "gke-security-groups@%s" + } +} +`, name, orgDomain) } -resource "google_container_cluster" "with_authenticator_groups" { - name = "%s" - location = "us-central1-a" - initial_node_count = 1 - network = google_compute_network.container_network.name - subnetwork = google_compute_subnetwork.container_subnetwork.name - - authenticator_groups_config { - security_group = "gke-security-groups@%s" - } +func testAccContainerCluster_withAuthenticatorGroupsConfigUpdate2(name string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 - networking_mode = "VPC_NATIVE" - ip_allocation_policy { - cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name - services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name - } + authenticator_groups_config { + security_group = "" + } } -`, containerNetName, clusterName, orgDomain) +`, name) } func testAccContainerCluster_withMasterAuthorizedNetworksConfig(clusterName string, cidrs []string, emptyValue string) string { @@ -4043,6 +4220,43 @@ resource "google_container_cluster" "with_autoprovisioning" { }`, cluster, imageTypeCfg) } +func testAccContainerCluster_autoprovisioningDefaultsBootDiskKmsKey(project, clusterName, kmsKeyName string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_project_iam_member" "kms-project-binding" { + project = data.google_project.project.project_id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@compute-system.iam.gserviceaccount.com" +} + +resource "google_container_cluster" "nap_boot_disk_kms_key" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + release_channel { + channel = "RAPID" + } + cluster_autoscaling { + enabled = true + resource_limits { + resource_type = "cpu" + maximum = 2 + } + resource_limits { + resource_type = "memory" + maximum = 2048 + } + auto_provisioning_defaults { + boot_disk_kms_key = "%s" + } + } +} +`, project, clusterName, kmsKeyName) +} + func testAccContainerCluster_withNodePoolAutoscaling(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool" { @@ -4795,9 +5009,23 @@ resource "google_container_cluster" "shared_vpc_cluster" { `, projectName, org, billingId, projectName, org, billingId, suffix, suffix, name) } -func testAccContainerCluster_withBinaryAuthorization(clusterName string, enabled bool) string { +func testAccContainerCluster_withBinaryAuthorizationEnabledBool(clusterName string, enabled bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_binary_authorization_enabled_bool" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + binary_authorization { + enabled = %v + } +} +`, clusterName, enabled) +} + +func testAccContainerCluster_withBinaryAuthorizationEnabledBoolLegacy(clusterName string, enabled bool) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_binary_authorization" { +resource "google_container_cluster" "with_binary_authorization_enabled_bool_legacy" { name = "%s" location = "us-central1-a" initial_node_count = 1 @@ -4807,6 +5035,23 @@ resource "google_container_cluster" "with_binary_authorization" { `, clusterName, enabled) } +func testAccContainerCluster_withBinaryAuthorizationEvaluationMode(clusterName string, autopilot_enabled bool, evaluation_mode string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_binary_authorization_evaluation_mode" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + ip_allocation_policy { + } + enable_autopilot = %v + + binary_authorization { + evaluation_mode = "%s" + } +} +`, clusterName, autopilot_enabled, evaluation_mode) +} + func testAccContainerCluster_withFlexiblePodCIDR(containerNetName string, clusterName string) string { return fmt.Sprintf(` resource "google_compute_network" "container_network" { @@ -4923,6 +5168,47 @@ resource "google_container_cluster" "with_resource_labels" { `, location) } +func testAccContainerCluster_withMeshCertificatesConfigEnabled(projectID string, clusterName string) string { + return fmt.Sprintf(` + data "google_project" "project" { + project_id = "%s" + } + + resource "google_container_cluster" "with_mesh_certificates_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + remove_default_node_pool = true + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + mesh_certificates { + enable_certificates = true + } + } +`, projectID, clusterName) +} + +func testAccContainerCluster_updateMeshCertificatesConfig(projectID string, clusterName string, enabled bool) string { + return fmt.Sprintf(` + data "google_project" "project" { + project_id = "%s" + } + + resource "google_container_cluster" "with_mesh_certificates_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + remove_default_node_pool = true + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + mesh_certificates { + enable_certificates = %v + } + }`, projectID, clusterName, enabled) +} + func testAccContainerCluster_withDatabaseEncryption(clusterName string, kmsData bootstrappedKMS) string { return fmt.Sprintf(` data "google_project" "project" { @@ -5191,7 +5477,7 @@ resource "google_container_cluster" "primary" { location = "us-central1-a" initial_node_count = 1 monitoring_config { - enable_components = [ "SYSTEM_COMPONENTS" ] + enable_components = [ "SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER" ] } } `, name) @@ -5204,7 +5490,7 @@ resource "google_container_cluster" "primary" { location = "us-central1-a" initial_node_count = 1 monitoring_config { - enable_components = [ "SYSTEM_COMPONENTS", "WORKLOADS" ] + enable_components = [ "SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER", "WORKLOADS" ] } } `, name) @@ -5217,7 +5503,7 @@ resource "google_container_cluster" "primary" { location = "us-central1-a" initial_node_count = 1 monitoring_config { - enable_components = [ "SYSTEM_COMPONENTS", "WORKLOADS" ] + enable_components = [ "SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER", "WORKLOADS" ] managed_prometheus { enabled = true } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_node_pool_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_node_pool_test.go index 5124445dd1..f6e19424d8 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_node_pool_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_node_pool_test.go @@ -560,7 +560,7 @@ func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) { }) } -//This test exists to validate a regional node pool *and* and update to it. +// This test exists to validate a regional node pool *and* and update to it. func TestAccContainerNodePool_regionalAutoscaling(t *testing.T) { t.Parallel() @@ -943,7 +943,6 @@ resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 - min_master_version = "1.19" } resource "google_container_node_pool" "np" { @@ -992,7 +991,6 @@ resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 - min_master_version = "1.19" } resource "google_container_node_pool" "np" { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go index a378619859..24780eb4aa 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go @@ -211,6 +211,7 @@ func resourceDataflowJob() *schema.Resource { "additional_experiments": { Type: schema.TypeSet, Optional: true, + Computed: true, Description: `List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].`, Elem: &schema.Schema{ Type: schema.TypeString, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job_test.go index 3ee91131c6..24c42a721d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job_test.go @@ -41,6 +41,12 @@ func TestAccDataflowJob_basic(t *testing.T) { testAccDataflowJobExists(t, "google_dataflow_job.big_data"), ), }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "zone", "state"}, + }, }, }) } @@ -67,6 +73,12 @@ func TestAccDataflowJobSkipWait_basic(t *testing.T) { testAccDataflowJobExists(t, "google_dataflow_job.big_data"), ), }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "zone", "state"}, + }, }, }) } @@ -92,6 +104,12 @@ func TestAccDataflowJob_withRegion(t *testing.T) { testAccRegionalDataflowJobExists(t, "google_dataflow_job.big_data", "us-central1"), ), }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "region", "state"}, + }, }, }) } @@ -119,6 +137,12 @@ func TestAccDataflowJob_withServiceAccount(t *testing.T) { testAccDataflowJobHasServiceAccount(t, "google_dataflow_job.big_data", accountId), ), }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state"}, + }, }, }) } @@ -146,6 +170,12 @@ func TestAccDataflowJob_withNetwork(t *testing.T) { testAccDataflowJobHasNetwork(t, "google_dataflow_job.big_data", network), ), }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state"}, + }, }, }) } @@ -174,6 +204,12 @@ func TestAccDataflowJob_withSubnetwork(t *testing.T) { testAccDataflowJobHasSubnetwork(t, "google_dataflow_job.big_data", subnetwork), ), }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "subnetwork", "state"}, + }, }, }) } @@ -202,6 +238,12 @@ func TestAccDataflowJob_withLabels(t *testing.T) { testAccDataflowJobHasLabels(t, "google_dataflow_job.with_labels", key), ), }, + { + ResourceName: "google_dataflow_job.with_labels", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state"}, + }, }, }) } @@ -227,6 +269,12 @@ func TestAccDataflowJob_withIpConfig(t *testing.T) { testAccDataflowJobExists(t, "google_dataflow_job.big_data"), ), }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "ip_configuration", "state"}, + }, }, }) } @@ -255,6 +303,12 @@ func TestAccDataflowJob_withKmsKey(t *testing.T) { testAccDataflowJobExists(t, "google_dataflow_job.big_data"), ), }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "zone", "state"}, + }, }, }) } @@ -281,6 +335,12 @@ func TestAccDataflowJobWithAdditionalExperiments(t *testing.T) { testAccDataflowJobHasExperiments(t, "google_dataflow_job.with_additional_experiments", additionalExperiments), ), }, + { + ResourceName: "google_dataflow_job.with_additional_experiments", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state"}, + }, }, }) } @@ -309,6 +369,12 @@ func TestAccDataflowJob_streamUpdate(t *testing.T) { testAccDataflowJobHasTempLocation(t, "google_dataflow_job.pubsub_stream", "gs://tf-test-bucket2-"+suffix), ), }, + { + ResourceName: "google_dataflow_job.pubsub_stream", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "transform_name_mapping", "state"}, + }, }, }) } @@ -342,6 +408,12 @@ func TestAccDataflowJob_virtualUpdate(t *testing.T) { resource.TestCheckResourceAttr("google_dataflow_job.pubsub_stream", "on_delete", "cancel"), ), }, + { + ResourceName: "google_dataflow_job.pubsub_stream", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state"}, + }, }, }) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_asset.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_asset.go new file mode 100644 index 0000000000..6734c76c19 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_asset.go @@ -0,0 +1,859 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" +) + +func resourceDataplexAsset() *schema.Resource { + return &schema.Resource{ + Create: resourceDataplexAssetCreate, + Read: resourceDataplexAssetRead, + Update: resourceDataplexAssetUpdate, + Delete: resourceDataplexAssetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataplexAssetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dataplex_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The zone for the resource", + }, + + "discovery_spec": { + Type: schema.TypeList, + Required: true, + Description: "Required. Specification of the discovery feature applied to data referenced by this asset. When this spec is left unset, the asset will use the spec set on the parent zone.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecSchema(), + }, + + "lake": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The lake for the resource", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The name of the asset.", + }, + + "resource_spec": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Specification of the resource that is referenced by this asset.", + MaxItems: 1, + Elem: DataplexAssetResourceSpecSchema(), + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the asset.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. User friendly display name.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User defined labels for the asset.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the asset was created.", + }, + + "discovery_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the discovery feature applied to data referenced by this asset.", + Elem: DataplexAssetDiscoveryStatusSchema(), + }, + + "resource_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the resource referenced by this asset.", + Elem: DataplexAssetResourceStatusSchema(), + }, + + "security_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the security policy applied to resource referenced by this asset.", + Elem: DataplexAssetSecurityStatusSchema(), + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Current state of the asset. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. System generated globally unique ID for the asset. This ID will be different if the asset is deleted and re-created with the same name.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the asset was last updated.", + }, + }, + } +} + +func DataplexAssetDiscoverySpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Required. Whether discovery is enabled.", + }, + + "csv_options": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for CSV data.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecCsvOptionsSchema(), + }, + + "exclude_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "include_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "json_options": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for Json data.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecJsonOptionsSchema(), + }, + + "schedule": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", + }, + }, + } +} + +func DataplexAssetDiscoverySpecCsvOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delimiter": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The delimiter being used to separate values. This defaults to ','.", + }, + + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + + "header_rows": { + Type: schema.TypeInt, + Optional: true, + Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", + }, + }, + } +} + +func DataplexAssetDiscoverySpecJsonOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + }, + } +} + +func DataplexAssetResourceSpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Type of resource. Possible values: STORAGE_BUCKET, BIGQUERY_DATASET", + }, + + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Immutable. Relative name of the cloud resource that contains the data that is being managed within a lake. For example: `projects/{project_number}/buckets/{bucket_id}` `projects/{project_number}/datasets/{dataset_id}`", + }, + }, + } +} + +func DataplexAssetDiscoveryStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "last_run_duration": { + Type: schema.TypeString, + Computed: true, + Description: "The duration of the last discovery run.", + }, + + "last_run_time": { + Type: schema.TypeString, + Computed: true, + Description: "The start time of the last discovery run.", + }, + + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current status of the discovery feature. Possible values: STATE_UNSPECIFIED, SCHEDULED, IN_PROGRESS, PAUSED, DISABLED", + }, + + "stats": { + Type: schema.TypeList, + Computed: true, + Description: "Data Stats of the asset reported by discovery.", + Elem: DataplexAssetDiscoveryStatusStatsSchema(), + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func DataplexAssetDiscoveryStatusStatsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_items": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of data items within the referenced resource.", + }, + + "data_size": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of stored data bytes within the referenced resource.", + }, + + "filesets": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of fileset entities within the referenced resource.", + }, + + "tables": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of table entities within the referenced resource.", + }, + }, + } +} + +func DataplexAssetResourceStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current state of the managed resource. Possible values: STATE_UNSPECIFIED, READY, ERROR", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func DataplexAssetSecurityStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current state of the security policy applied to the attached resource. Possible values: STATE_UNSPECIFIED, READY, APPLYING, ERROR", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func resourceDataplexAssetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyAsset(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Asset: %s", err) + } + + log.Printf("[DEBUG] Finished creating Asset %q: %#v", d.Id(), res) + + return resourceDataplexAssetRead(d, meta) +} + +func resourceDataplexAssetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetAsset(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataplexAsset %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("dataplex_zone", res.DataplexZone); err != nil { + return fmt.Errorf("error setting dataplex_zone in state: %s", err) + } + if err = d.Set("discovery_spec", flattenDataplexAssetDiscoverySpec(res.DiscoverySpec)); err != nil { + return fmt.Errorf("error setting discovery_spec in state: %s", err) + } + if err = d.Set("lake", res.Lake); err != nil { + return fmt.Errorf("error setting lake in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("resource_spec", flattenDataplexAssetResourceSpec(res.ResourceSpec)); err != nil { + return fmt.Errorf("error setting resource_spec in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("discovery_status", flattenDataplexAssetDiscoveryStatus(res.DiscoveryStatus)); err != nil { + return fmt.Errorf("error setting discovery_status in state: %s", err) + } + if err = d.Set("resource_status", flattenDataplexAssetResourceStatus(res.ResourceStatus)); err != nil { + return fmt.Errorf("error setting resource_status in state: %s", err) + } + if err = d.Set("security_status", flattenDataplexAssetSecurityStatus(res.SecurityStatus)); err != nil { + return fmt.Errorf("error setting security_status in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataplexAssetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyAsset(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Asset: %s", err) + } + + log.Printf("[DEBUG] Finished creating Asset %q: %#v", d.Id(), res) + + return resourceDataplexAssetRead(d, meta) +} + +func resourceDataplexAssetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Asset %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteAsset(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Asset: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Asset %q", d.Id()) + return nil +} + +func resourceDataplexAssetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)/assets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataplexAssetDiscoverySpec(o interface{}) *dataplex.AssetDiscoverySpec { + if o == nil { + return dataplex.EmptyAssetDiscoverySpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyAssetDiscoverySpec + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.AssetDiscoverySpec{ + Enabled: dcl.Bool(obj["enabled"].(bool)), + CsvOptions: expandDataplexAssetDiscoverySpecCsvOptions(obj["csv_options"]), + ExcludePatterns: expandStringArray(obj["exclude_patterns"]), + IncludePatterns: expandStringArray(obj["include_patterns"]), + JsonOptions: expandDataplexAssetDiscoverySpecJsonOptions(obj["json_options"]), + Schedule: dcl.String(obj["schedule"].(string)), + } +} + +func flattenDataplexAssetDiscoverySpec(obj *dataplex.AssetDiscoverySpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enabled": obj.Enabled, + "csv_options": flattenDataplexAssetDiscoverySpecCsvOptions(obj.CsvOptions), + "exclude_patterns": obj.ExcludePatterns, + "include_patterns": obj.IncludePatterns, + "json_options": flattenDataplexAssetDiscoverySpecJsonOptions(obj.JsonOptions), + "schedule": obj.Schedule, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetDiscoverySpecCsvOptions(o interface{}) *dataplex.AssetDiscoverySpecCsvOptions { + if o == nil { + return dataplex.EmptyAssetDiscoverySpecCsvOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyAssetDiscoverySpecCsvOptions + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.AssetDiscoverySpecCsvOptions{ + Delimiter: dcl.String(obj["delimiter"].(string)), + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + HeaderRows: dcl.Int64(int64(obj["header_rows"].(int))), + } +} + +func flattenDataplexAssetDiscoverySpecCsvOptions(obj *dataplex.AssetDiscoverySpecCsvOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "delimiter": obj.Delimiter, + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + "header_rows": obj.HeaderRows, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetDiscoverySpecJsonOptions(o interface{}) *dataplex.AssetDiscoverySpecJsonOptions { + if o == nil { + return dataplex.EmptyAssetDiscoverySpecJsonOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyAssetDiscoverySpecJsonOptions + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.AssetDiscoverySpecJsonOptions{ + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + } +} + +func flattenDataplexAssetDiscoverySpecJsonOptions(obj *dataplex.AssetDiscoverySpecJsonOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetResourceSpec(o interface{}) *dataplex.AssetResourceSpec { + if o == nil { + return dataplex.EmptyAssetResourceSpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyAssetResourceSpec + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.AssetResourceSpec{ + Type: dataplex.AssetResourceSpecTypeEnumRef(obj["type"].(string)), + Name: dcl.String(obj["name"].(string)), + } +} + +func flattenDataplexAssetResourceSpec(obj *dataplex.AssetResourceSpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "type": obj.Type, + "name": obj.Name, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetDiscoveryStatus(obj *dataplex.AssetDiscoveryStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "last_run_duration": obj.LastRunDuration, + "last_run_time": obj.LastRunTime, + "message": obj.Message, + "state": obj.State, + "stats": flattenDataplexAssetDiscoveryStatusStats(obj.Stats), + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetDiscoveryStatusStats(obj *dataplex.AssetDiscoveryStatusStats) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "data_items": obj.DataItems, + "data_size": obj.DataSize, + "filesets": obj.Filesets, + "tables": obj.Tables, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetResourceStatus(obj *dataplex.AssetResourceStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "message": obj.Message, + "state": obj.State, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetSecurityStatus(obj *dataplex.AssetSecurityStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "message": obj.Message, + "state": obj.State, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_asset_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_asset_generated_test.go new file mode 100644 index 0000000000..cc57c05389 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_asset_generated_test.go @@ -0,0 +1,234 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccDataplexAsset_BasicAssetHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "region": getTestRegionFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDataplexAssetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataplexAsset_BasicAssetHandWritten(context), + }, + { + ResourceName: "google_dataplex_asset.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_spec.0.name"}, + }, + { + Config: testAccDataplexAsset_BasicAssetHandWrittenUpdate0(context), + }, + { + ResourceName: "google_dataplex_asset.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_spec.0.name"}, + }, + }, + }) +} + +func testAccDataplexAsset_BasicAssetHandWritten(context map[string]interface{}) string { + return Nprintf(` +resource "google_storage_bucket" "basic_bucket" { + name = "dataplex-bucket-%{random_suffix}" + location = "%{region}" + uniform_bucket_level_access = true + lifecycle { + ignore_changes = [ + labels + ] + } + + project = "%{project_name}" +} + +resource "google_dataplex_lake" "basic_lake" { + name = "tf-test-lake%{random_suffix}" + location = "%{region}" + project = "%{project_name}" +} + + +resource "google_dataplex_zone" "basic_zone" { + name = "tf-test-zone%{random_suffix}" + location = "%{region}" + lake = google_dataplex_lake.basic_lake.name + type = "RAW" + + discovery_spec { + enabled = false + } + + + resource_spec { + location_type = "SINGLE_REGION" + } + + project = "%{project_name}" +} + + +resource "google_dataplex_asset" "primary" { + name = "tf-test-asset%{random_suffix}" + location = "%{region}" + + lake = google_dataplex_lake.basic_lake.name + dataplex_zone = google_dataplex_zone.basic_zone.name + + discovery_spec { + enabled = false + } + + resource_spec { + name = "projects/%{project_name}/buckets/dataplex-bucket-%{random_suffix}" + type = "STORAGE_BUCKET" + } + + project = "%{project_name}" + depends_on = [ + google_storage_bucket.basic_bucket + ] +} +`, context) +} + +func testAccDataplexAsset_BasicAssetHandWrittenUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_storage_bucket" "basic_bucket" { + name = "dataplex-bucket-%{random_suffix}" + location = "%{region}" + uniform_bucket_level_access = true + lifecycle { + ignore_changes = [ + labels + ] + } + + project = "%{project_name}" +} + +resource "google_dataplex_lake" "basic_lake" { + name = "tf-test-lake%{random_suffix}" + location = "%{region}" + project = "%{project_name}" +} + + +resource "google_dataplex_zone" "basic_zone" { + name = "tf-test-zone%{random_suffix}" + location = "%{region}" + lake = google_dataplex_lake.basic_lake.name + type = "RAW" + + discovery_spec { + enabled = false + } + + + resource_spec { + location_type = "SINGLE_REGION" + } + + project = "%{project_name}" +} + + +resource "google_dataplex_asset" "primary" { + name = "tf-test-asset%{random_suffix}" + location = "%{region}" + + lake = google_dataplex_lake.basic_lake.name + dataplex_zone = google_dataplex_zone.basic_zone.name + + discovery_spec { + enabled = false + } + + resource_spec { + name = "projects/%{project_name}/buckets/dataplex-bucket-%{random_suffix}" + type = "STORAGE_BUCKET" + } + + project = "%{project_name}" + depends_on = [ + google_storage_bucket.basic_bucket + ] +} +`, context) +} + +func testAccCheckDataplexAssetDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_dataplex_asset" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &dataplex.Asset{ + DataplexZone: dcl.String(rs.Primary.Attributes["dataplex_zone"]), + Lake: dcl.String(rs.Primary.Attributes["lake"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + State: dataplex.AssetStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := NewDCLDataplexClient(config, config.userAgent, billingProject, 0) + _, err := client.GetAsset(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_dataplex_asset still exists %v", obj) + } + } + return nil + } +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_lake.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_lake.go index 69fbc14617..ec74a868d1 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_lake.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_lake.go @@ -225,12 +225,12 @@ func resourceDataplexLakeCreate(d *schema.ResourceData, meta interface{}) error Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -247,7 +247,7 @@ func resourceDataplexLakeCreate(d *schema.ResourceData, meta interface{}) error } else { client.Config.BasePath = bp } - res, err := client.ApplyLake(context.Background(), obj, createDirective...) + res, err := client.ApplyLake(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_zone.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_zone.go new file mode 100644 index 0000000000..b0a1a12e75 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_zone.go @@ -0,0 +1,688 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" +) + +func resourceDataplexZone() *schema.Resource { + return &schema.Resource{ + Create: resourceDataplexZoneCreate, + Read: resourceDataplexZoneRead, + Update: resourceDataplexZoneUpdate, + Delete: resourceDataplexZoneDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataplexZoneImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "discovery_spec": { + Type: schema.TypeList, + Required: true, + Description: "Required. Specification of the discovery feature applied to data in this zone.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecSchema(), + }, + + "lake": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The lake for the resource", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The name of the zone.", + }, + + "resource_spec": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Specification of the resources that are referenced by the assets within this zone.", + MaxItems: 1, + Elem: DataplexZoneResourceSpecSchema(), + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. The type of the zone. Possible values: TYPE_UNSPECIFIED, RAW, CURATED", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the zone.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. User friendly display name.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User defined labels for the zone.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "asset_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Aggregated status of the underlying assets of the zone.", + Elem: DataplexZoneAssetStatusSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the zone was created.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Current state of the zone. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. System generated globally unique ID for the zone. This ID will be different if the zone is deleted and re-created with the same name.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the zone was last updated.", + }, + }, + } +} + +func DataplexZoneDiscoverySpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Required. Whether discovery is enabled.", + }, + + "csv_options": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for CSV data.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecCsvOptionsSchema(), + }, + + "exclude_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "include_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "json_options": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for Json data.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecJsonOptionsSchema(), + }, + + "schedule": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", + }, + }, + } +} + +func DataplexZoneDiscoverySpecCsvOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delimiter": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The delimiter being used to separate values. This defaults to ','.", + }, + + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + + "header_rows": { + Type: schema.TypeInt, + Optional: true, + Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", + }, + }, + } +} + +func DataplexZoneDiscoverySpecJsonOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + }, + } +} + +func DataplexZoneResourceSpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. The location type of the resources that are allowed to be attached to the assets within this zone. Possible values: LOCATION_TYPE_UNSPECIFIED, SINGLE_REGION, MULTI_REGION", + }, + }, + } +} + +func DataplexZoneAssetStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "active_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of active assets.", + }, + + "security_policy_applying_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of assets that are in process of updating the security policy on attached resources.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func resourceDataplexZoneCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyZone(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Zone: %s", err) + } + + log.Printf("[DEBUG] Finished creating Zone %q: %#v", d.Id(), res) + + return resourceDataplexZoneRead(d, meta) +} + +func resourceDataplexZoneRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetZone(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataplexZone %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("discovery_spec", flattenDataplexZoneDiscoverySpec(res.DiscoverySpec)); err != nil { + return fmt.Errorf("error setting discovery_spec in state: %s", err) + } + if err = d.Set("lake", res.Lake); err != nil { + return fmt.Errorf("error setting lake in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("resource_spec", flattenDataplexZoneResourceSpec(res.ResourceSpec)); err != nil { + return fmt.Errorf("error setting resource_spec in state: %s", err) + } + if err = d.Set("type", res.Type); err != nil { + return fmt.Errorf("error setting type in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("asset_status", flattenDataplexZoneAssetStatus(res.AssetStatus)); err != nil { + return fmt.Errorf("error setting asset_status in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataplexZoneUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyZone(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Zone: %s", err) + } + + log.Printf("[DEBUG] Finished creating Zone %q: %#v", d.Id(), res) + + return resourceDataplexZoneRead(d, meta) +} + +func resourceDataplexZoneDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Zone %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteZone(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Zone: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Zone %q", d.Id()) + return nil +} + +func resourceDataplexZoneImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataplexZoneDiscoverySpec(o interface{}) *dataplex.ZoneDiscoverySpec { + if o == nil { + return dataplex.EmptyZoneDiscoverySpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyZoneDiscoverySpec + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.ZoneDiscoverySpec{ + Enabled: dcl.Bool(obj["enabled"].(bool)), + CsvOptions: expandDataplexZoneDiscoverySpecCsvOptions(obj["csv_options"]), + ExcludePatterns: expandStringArray(obj["exclude_patterns"]), + IncludePatterns: expandStringArray(obj["include_patterns"]), + JsonOptions: expandDataplexZoneDiscoverySpecJsonOptions(obj["json_options"]), + Schedule: dcl.StringOrNil(obj["schedule"].(string)), + } +} + +func flattenDataplexZoneDiscoverySpec(obj *dataplex.ZoneDiscoverySpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enabled": obj.Enabled, + "csv_options": flattenDataplexZoneDiscoverySpecCsvOptions(obj.CsvOptions), + "exclude_patterns": obj.ExcludePatterns, + "include_patterns": obj.IncludePatterns, + "json_options": flattenDataplexZoneDiscoverySpecJsonOptions(obj.JsonOptions), + "schedule": obj.Schedule, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneDiscoverySpecCsvOptions(o interface{}) *dataplex.ZoneDiscoverySpecCsvOptions { + if o == nil { + return dataplex.EmptyZoneDiscoverySpecCsvOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyZoneDiscoverySpecCsvOptions + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.ZoneDiscoverySpecCsvOptions{ + Delimiter: dcl.String(obj["delimiter"].(string)), + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + HeaderRows: dcl.Int64(int64(obj["header_rows"].(int))), + } +} + +func flattenDataplexZoneDiscoverySpecCsvOptions(obj *dataplex.ZoneDiscoverySpecCsvOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "delimiter": obj.Delimiter, + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + "header_rows": obj.HeaderRows, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneDiscoverySpecJsonOptions(o interface{}) *dataplex.ZoneDiscoverySpecJsonOptions { + if o == nil { + return dataplex.EmptyZoneDiscoverySpecJsonOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyZoneDiscoverySpecJsonOptions + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.ZoneDiscoverySpecJsonOptions{ + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + } +} + +func flattenDataplexZoneDiscoverySpecJsonOptions(obj *dataplex.ZoneDiscoverySpecJsonOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneResourceSpec(o interface{}) *dataplex.ZoneResourceSpec { + if o == nil { + return dataplex.EmptyZoneResourceSpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyZoneResourceSpec + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.ZoneResourceSpec{ + LocationType: dataplex.ZoneResourceSpecLocationTypeEnumRef(obj["location_type"].(string)), + } +} + +func flattenDataplexZoneResourceSpec(obj *dataplex.ZoneResourceSpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "location_type": obj.LocationType, + } + + return []interface{}{transformed} + +} + +func flattenDataplexZoneAssetStatus(obj *dataplex.ZoneAssetStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "active_assets": obj.ActiveAssets, + "security_policy_applying_assets": obj.SecurityPolicyApplyingAssets, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_zone_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_zone_generated_test.go new file mode 100644 index 0000000000..ff39eef41b --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_zone_generated_test.go @@ -0,0 +1,184 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccDataplexZone_BasicZone(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "region": getTestRegionFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDataplexZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataplexZone_BasicZone(context), + }, + { + ResourceName: "google_dataplex_zone.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDataplexZone_BasicZoneUpdate0(context), + }, + { + ResourceName: "google_dataplex_zone.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDataplexZone_BasicZone(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataplex_zone" "primary" { + discovery_spec { + enabled = false + } + + lake = google_dataplex_lake.basic.name + location = "%{region}" + name = "tf-test-zone%{random_suffix}" + + resource_spec { + location_type = "MULTI_REGION" + } + + type = "RAW" + description = "Zone for DCL" + display_name = "Zone for DCL" + labels = {} + project = "%{project_name}" +} + +resource "google_dataplex_lake" "basic" { + location = "%{region}" + name = "tf-test-lake%{random_suffix}" + description = "Lake for DCL" + display_name = "Lake for DCL" + + labels = { + my-lake = "exists" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccDataplexZone_BasicZoneUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataplex_zone" "primary" { + discovery_spec { + enabled = false + } + + lake = google_dataplex_lake.basic.name + location = "%{region}" + name = "tf-test-zone%{random_suffix}" + + resource_spec { + location_type = "MULTI_REGION" + } + + type = "RAW" + description = "Zone for DCL Updated" + display_name = "Zone for DCL" + + labels = { + updated_label = "exists" + } + + project = "%{project_name}" +} + +resource "google_dataplex_lake" "basic" { + location = "%{region}" + name = "tf-test-lake%{random_suffix}" + description = "Lake for DCL" + display_name = "Lake for DCL" + + labels = { + my-lake = "exists" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckDataplexZoneDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_dataplex_zone" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &dataplex.Zone{ + Lake: dcl.String(rs.Primary.Attributes["lake"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Type: dataplex.ZoneTypeEnumRef(rs.Primary.Attributes["type"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + State: dataplex.ZoneStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := NewDCLDataplexClient(config, config.userAgent, billingProject, 0) + _, err := client.GetZone(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_dataplex_zone still exists %v", obj) + } + } + return nil + } +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_autoscaling_policy_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_autoscaling_policy_generated_test.go index 645f890bf4..b8cc57042e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_autoscaling_policy_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_autoscaling_policy_generated_test.go @@ -23,6 +23,53 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) +func TestAccDataprocAutoscalingPolicy_dataprocAutoscalingPolicyBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDataprocAutoscalingPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocAutoscalingPolicy_dataprocAutoscalingPolicyBasicExample(context), + }, + { + ResourceName: "google_dataproc_autoscaling_policy.basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location"}, + }, + }, + }) +} + +func testAccDataprocAutoscalingPolicy_dataprocAutoscalingPolicyBasicExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_autoscaling_policy" "basic" { + policy_id = "tf-test-dataproc-policy%{random_suffix}" + location = "us-central1" + + worker_config { + max_instances = 3 + } + + basic_algorithm { + yarn_config { + graceful_decommission_timeout = "30s" + + scale_up_factor = 0.5 + scale_down_factor = 0.5 + } + } +} +`, context) +} + func TestAccDataprocAutoscalingPolicy_dataprocAutoscalingPolicyExample(t *testing.T) { t.Parallel() diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_cluster.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_cluster.go index 1066ca1e43..30fd6bf662 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_cluster.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_cluster.go @@ -60,9 +60,9 @@ var ( "cluster_config.0.initialization_action", "cluster_config.0.encryption_config", "cluster_config.0.autoscaling_config", + "cluster_config.0.metastore_config", "cluster_config.0.lifecycle_config", "cluster_config.0.endpoint_config", - "cluster_config.0.metastore_config", } ) @@ -636,6 +636,23 @@ by Dataproc`, }, }, }, + "metastore_config": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + MaxItems: 1, + Description: `Specifies a Metastore configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataproc_metastore_service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Resource name of an existing Dataproc Metastore service.`, + }, + }, + }, + }, "lifecycle_config": { Type: schema.TypeList, Optional: true, @@ -698,23 +715,6 @@ by Dataproc`, }, }, }, - "metastore_config": { - Type: schema.TypeList, - Optional: true, - AtLeastOneOf: clusterConfigKeys, - MaxItems: 1, - Description: `Specifies a Metastore configuration.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataproc_metastore_service": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource name of an existing Dataproc Metastore service.`, - }, - }, - }, - }, }, }, }, @@ -889,6 +889,7 @@ func resourceDataprocClusterCreate(d *schema.ResourceData, meta interface{}) err } cluster.Config, err = expandClusterConfig(d, config) + if err != nil { return err } @@ -974,6 +975,10 @@ func expandClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.Clus conf.AutoscalingConfig = expandAutoscalingConfig(cfg) } + if cfg, ok := configOptions(d, "cluster_config.0.metastore_config"); ok { + conf.MetastoreConfig = expandMetastoreConfig(cfg) + } + if cfg, ok := configOptions(d, "cluster_config.0.lifecycle_config"); ok { conf.LifecycleConfig = expandLifecycleConfig(cfg) } @@ -982,10 +987,6 @@ func expandClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.Clus conf.EndpointConfig = expandEndpointConfig(cfg) } - if cfg, ok := configOptions(d, "cluster_config.0.metastore_config"); ok { - conf.MetastoreConfig = expandMetastoreConfig(cfg) - } - if cfg, ok := configOptions(d, "cluster_config.0.master_config"); ok { log.Println("[INFO] got master_config") conf.MasterConfig = expandInstanceGroupConfig(cfg) @@ -1433,7 +1434,10 @@ func resourceDataprocClusterRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error setting labels: %s", err) } - cfg, err := flattenClusterConfig(d, cluster.Config) + var cfg []map[string]interface{} + + cfg, err = flattenClusterConfig(d, cluster.Config) + if err != nil { return err } @@ -1460,9 +1464,9 @@ func flattenClusterConfig(d *schema.ResourceData, cfg *dataproc.ClusterConfig) ( "autoscaling_config": flattenAutoscalingConfig(d, cfg.AutoscalingConfig), "security_config": flattenSecurityConfig(d, cfg.SecurityConfig), "preemptible_worker_config": flattenPreemptibleInstanceGroupConfig(d, cfg.SecondaryWorkerConfig), + "metastore_config": flattenMetastoreConfig(d, cfg.MetastoreConfig), "lifecycle_config": flattenLifecycleConfig(d, cfg.LifecycleConfig), "endpoint_config": flattenEndpointConfig(d, cfg.EndpointConfig), - "metastore_config": flattenMetastoreConfig(d, cfg.MetastoreConfig), } if len(cfg.InitializationActions) > 0 { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation.go new file mode 100644 index 0000000000..1fa44c1a45 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation.go @@ -0,0 +1,511 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceDataprocMetastoreFederation() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocMetastoreFederationCreate, + Read: resourceDataprocMetastoreFederationRead, + Update: resourceDataprocMetastoreFederationUpdate, + Delete: resourceDataprocMetastoreFederationDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataprocMetastoreFederationImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "backend_metastores": { + Type: schema.TypeSet, + Required: true, + Description: `A map from BackendMetastore rank to BackendMetastores from which the federation service serves metadata at query time. The map key represents the order in which BackendMetastores should be evaluated to resolve database names at query time and should be greater than or equal to zero. A BackendMetastore with a lower number will be evaluated before a BackendMetastore with a higher number.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rank": { + Type: schema.TypeString, + Required: true, + }, + "metastore_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateEnum([]string{"METASTORE_TYPE_UNSPECIFIED", "DATAPROC_METASTORE"}), + Description: `The type of the backend metastore. Possible values: ["METASTORE_TYPE_UNSPECIFIED", "DATAPROC_METASTORE"]`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `The relative resource name of the metastore that is being federated.`, + }, + }, + }, + }, + "federation_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the metastore federation. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), +and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between +3 and 63 characters.`, + }, + "version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apache Hive metastore version of the federation. All backend metastore versions must be compatible with the federation version.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-defined labels for the metastore federation.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location where the metastore federation should reside.`, + }, + "endpoint_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the endpoint used to access the metastore federation.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The relative resource name of the metastore federation.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the metastore federation.`, + }, + "state_message": { + Type: schema.TypeString, + Computed: true, + Description: `Additional information about the current state of the metastore federation, if available.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `The globally unique resource identifier of the metastore federation.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataprocMetastoreFederationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandDataprocMetastoreFederationLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + versionProp, err := expandDataprocMetastoreFederationVersion(d.Get("version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version"); !isEmptyValue(reflect.ValueOf(versionProp)) && (ok || !reflect.DeepEqual(v, versionProp)) { + obj["version"] = versionProp + } + backendMetastoresProp, err := expandDataprocMetastoreFederationBackendMetastores(d.Get("backend_metastores"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_metastores"); !isEmptyValue(reflect.ValueOf(backendMetastoresProp)) && (ok || !reflect.DeepEqual(v, backendMetastoresProp)) { + obj["backendMetastores"] = backendMetastoresProp + } + + url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/federations?federationId={{federation_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Federation: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Federation: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Federation: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/federations/{{federation_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = dataprocMetastoreOperationWaitTime( + config, res, project, "Creating Federation", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Federation: %s", err) + } + + log.Printf("[DEBUG] Finished creating Federation %q: %#v", d.Id(), res) + + return resourceDataprocMetastoreFederationRead(d, meta) +} + +func resourceDataprocMetastoreFederationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/federations/{{federation_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Federation: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("DataprocMetastoreFederation %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + + if err := d.Set("name", flattenDataprocMetastoreFederationName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("labels", flattenDataprocMetastoreFederationLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("endpoint_uri", flattenDataprocMetastoreFederationEndpointUri(res["endpointUri"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("state", flattenDataprocMetastoreFederationState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("state_message", flattenDataprocMetastoreFederationStateMessage(res["stateMessage"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("uid", flattenDataprocMetastoreFederationUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("version", flattenDataprocMetastoreFederationVersion(res["version"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("backend_metastores", flattenDataprocMetastoreFederationBackendMetastores(res["backendMetastores"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + + return nil +} + +func resourceDataprocMetastoreFederationUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Federation: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandDataprocMetastoreFederationLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + backendMetastoresProp, err := expandDataprocMetastoreFederationBackendMetastores(d.Get("backend_metastores"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_metastores"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backendMetastoresProp)) { + obj["backendMetastores"] = backendMetastoresProp + } + + url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/federations/{{federation_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Federation %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("backend_metastores") { + updateMask = append(updateMask, "backendMetastores") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating Federation %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Federation %q: %#v", d.Id(), res) + } + + err = dataprocMetastoreOperationWaitTime( + config, res, project, "Updating Federation", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceDataprocMetastoreFederationRead(d, meta) +} + +func resourceDataprocMetastoreFederationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Federation: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/federations/{{federation_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Federation %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "Federation") + } + + err = dataprocMetastoreOperationWaitTime( + config, res, project, "Deleting Federation", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Federation %q: %#v", d.Id(), res) + return nil +} + +func resourceDataprocMetastoreFederationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/federations/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/federations/{{federation_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataprocMetastoreFederationName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationEndpointUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationStateMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationBackendMetastores(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(l)) + for k, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "rank": k, + "name": flattenDataprocMetastoreFederationBackendMetastoresName(original["name"], d, config), + "metastore_type": flattenDataprocMetastoreFederationBackendMetastoresMetastoreType(original["metastoreType"], d, config), + }) + } + return transformed +} +func flattenDataprocMetastoreFederationBackendMetastoresName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationBackendMetastoresMetastoreType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandDataprocMetastoreFederationLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocMetastoreFederationVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreFederationBackendMetastores(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataprocMetastoreFederationBackendMetastoresName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedMetastoreType, err := expandDataprocMetastoreFederationBackendMetastoresMetastoreType(original["metastore_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetastoreType); val.IsValid() && !isEmptyValue(val) { + transformed["metastoreType"] = transformedMetastoreType + } + + transformedRank, err := expandString(original["rank"], d, config) + if err != nil { + return nil, err + } + m[transformedRank] = transformed + } + return m, nil +} + +func expandDataprocMetastoreFederationBackendMetastoresName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreFederationBackendMetastoresMetastoreType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation_generated_test.go new file mode 100644 index 0000000000..ebea7713ac --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation_generated_test.go @@ -0,0 +1,112 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccDataprocMetastoreFederation_dataprocMetastoreFederationBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckDataprocMetastoreFederationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreFederation_dataprocMetastoreFederationBasicExample(context), + }, + { + ResourceName: "google_dataproc_metastore_federation.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"federation_id", "location"}, + }, + }, + }) +} + +func testAccDataprocMetastoreFederation_dataprocMetastoreFederationBasicExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_federation" "default" { + provider = google-beta + location = "us-central1" + federation_id = "tf-test-fed-1%{random_suffix}" + version = "3.1.2" + + backend_metastores { + rank = "1" + name = google_dataproc_metastore_service.default.id + metastore_type = "DATAPROC_METASTORE" + } +} + +resource "google_dataproc_metastore_service" "default" { + provider = google-beta + service_id = "tf-test-fed-1%{random_suffix}" + location = "us-central1" + tier = "DEVELOPER" + + + hive_metastore_config { + version = "3.1.2" + endpoint_protocol = "GRPC" + } +} +`, context) +} + +func testAccCheckDataprocMetastoreFederationDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_dataproc_metastore_federation" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + url, err := replaceVarsForTest(config, rs, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/federations/{{federation_id}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil) + if err == nil { + return fmt.Errorf("DataprocMetastoreFederation still exists at %s", url) + } + } + + return nil + } +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation_sweeper_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation_sweeper_test.go new file mode 100644 index 0000000000..24c7f34ff4 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation_sweeper_test.go @@ -0,0 +1,128 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("DataprocMetastoreFederation", &resource.Sweeper{ + Name: "DataprocMetastoreFederation", + F: testSweepDataprocMetastoreFederation, + }) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataprocMetastoreFederation(region string) error { + resourceName := "DataprocMetastoreFederation" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://metastore.googleapis.com/v1beta/projects/{{project}}/locations/{{location}}/federations", "?")[0] + listUrl, err := replaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["federations"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !isSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://metastore.googleapis.com/v1beta/projects/{{project}}/locations/{{location}}/federations/{{federation_id}}" + deleteUrl, err := replaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service.go index cc77379b3a..80c811a2d4 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service.go @@ -50,6 +50,14 @@ func resourceDataprocMetastoreService() *schema.Resource { and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 63 characters.`, }, + "database_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateEnum([]string{"MYSQL", "SPANNER", ""}), + Description: `The database type that the Metastore service stores its data. Default value: "MYSQL" Possible values: ["MYSQL", "SPANNER"]`, + Default: "MYSQL", + }, "encryption_config": { Type: schema.TypeList, Optional: true, @@ -81,6 +89,35 @@ Use the following format: 'projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/c ForceNew: true, Description: `The Hive metastore schema version.`, }, + "auxiliary_versions": { + Type: schema.TypeSet, + Optional: true, + Description: `A mapping of Hive metastore version to the auxiliary version configuration. +When specified, a secondary Hive metastore service is created along with the primary service. +All auxiliary versions must be less than the service's primary version. +The key is the auxiliary service name and it must match the regular expression a-z?. +This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "version": { + Type: schema.TypeString, + Required: true, + Description: `The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.`, + }, + "config_overrides": { + Type: schema.TypeMap, + Optional: true, + Description: `A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. +If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, "config_overrides": { Type: schema.TypeMap, Computed: true, @@ -90,6 +127,14 @@ Use the following format: 'projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/c The mappings override system defaults (some keys cannot be overridden)`, Elem: &schema.Schema{Type: schema.TypeString}, }, + "endpoint_protocol": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateEnum([]string{"THRIFT", "GRPC", ""}), + Description: `The protocol to use for the metastore service endpoint. If unspecified, defaults to 'THRIFT'. Default value: "THRIFT" Possible values: ["THRIFT", "GRPC"]`, + Default: "THRIFT", + }, "kerberos_config": { Type: schema.TypeList, Optional: true, @@ -140,7 +185,7 @@ The mappings override system defaults (some keys cannot be overridden)`, Type: schema.TypeString, Optional: true, ForceNew: true, - Description: `The location where the autoscaling policy should reside. + Description: `The location where the metastore service should reside. The default value is 'global'.`, Default: "global", }, @@ -148,7 +193,8 @@ The default value is 'global'.`, Type: schema.TypeList, Optional: true, Description: `The one hour maintenance window of the metastore service. -This specifies when the service can be restarted for maintenance purposes in UTC time.`, +This specifies when the service can be restarted for maintenance purposes in UTC time. +Maintenance window is not needed for services with the 'SPANNER' database type.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -166,6 +212,31 @@ This specifies when the service can be restarted for maintenance purposes in UTC }, }, }, + "metadata_integration": { + Type: schema.TypeList, + Optional: true, + Description: `The setting that defines how metastore metadata should be integrated with external services and systems.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_catalog_config": { + Type: schema.TypeList, + Required: true, + Description: `The integration config for the Data Catalog service.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.`, + }, + }, + }, + }, + }, + }, + }, "network": { Type: schema.TypeString, Computed: true, @@ -181,6 +252,14 @@ This specifies when the service can be restarted for maintenance purposes in UTC Optional: true, Description: `The TCP port at which the metastore service is reached. Default: 9083.`, }, + "release_channel": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateEnum([]string{"CANARY", "STABLE", ""}), + Description: `The release channel of the service. If unspecified, defaults to 'STABLE'. Default value: "STABLE" Possible values: ["CANARY", "STABLE"]`, + Default: "STABLE", + }, "tier": { Type: schema.TypeString, Computed: true, @@ -213,6 +292,11 @@ This specifies when the service can be restarted for maintenance purposes in UTC Computed: true, Description: `Additional information about the current state of the metastore service, if available.`, }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `The globally unique resource identifier of the metastore service.`, + }, "project": { Type: schema.TypeString, Optional: true, @@ -274,6 +358,24 @@ func resourceDataprocMetastoreServiceCreate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("hive_metastore_config"); !isEmptyValue(reflect.ValueOf(hiveMetastoreConfigProp)) && (ok || !reflect.DeepEqual(v, hiveMetastoreConfigProp)) { obj["hiveMetastoreConfig"] = hiveMetastoreConfigProp } + databaseTypeProp, err := expandDataprocMetastoreServiceDatabaseType(d.Get("database_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("database_type"); !isEmptyValue(reflect.ValueOf(databaseTypeProp)) && (ok || !reflect.DeepEqual(v, databaseTypeProp)) { + obj["databaseType"] = databaseTypeProp + } + releaseChannelProp, err := expandDataprocMetastoreServiceReleaseChannel(d.Get("release_channel"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("release_channel"); !isEmptyValue(reflect.ValueOf(releaseChannelProp)) && (ok || !reflect.DeepEqual(v, releaseChannelProp)) { + obj["releaseChannel"] = releaseChannelProp + } + metadataIntegrationProp, err := expandDataprocMetastoreServiceMetadataIntegration(d.Get("metadata_integration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata_integration"); !isEmptyValue(reflect.ValueOf(metadataIntegrationProp)) && (ok || !reflect.DeepEqual(v, metadataIntegrationProp)) { + obj["metadataIntegration"] = metadataIntegrationProp + } url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services?serviceId={{service_id}}") if err != nil { @@ -391,6 +493,18 @@ func resourceDataprocMetastoreServiceRead(d *schema.ResourceData, meta interface if err := d.Set("hive_metastore_config", flattenDataprocMetastoreServiceHiveMetastoreConfig(res["hiveMetastoreConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Service: %s", err) } + if err := d.Set("database_type", flattenDataprocMetastoreServiceDatabaseType(res["databaseType"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("release_channel", flattenDataprocMetastoreServiceReleaseChannel(res["releaseChannel"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("uid", flattenDataprocMetastoreServiceUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("metadata_integration", flattenDataprocMetastoreServiceMetadataIntegration(res["metadataIntegration"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } return nil } @@ -447,6 +561,12 @@ func resourceDataprocMetastoreServiceUpdate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("hive_metastore_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hiveMetastoreConfigProp)) { obj["hiveMetastoreConfig"] = hiveMetastoreConfigProp } + metadataIntegrationProp, err := expandDataprocMetastoreServiceMetadataIntegration(d.Get("metadata_integration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata_integration"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataIntegrationProp)) { + obj["metadataIntegration"] = metadataIntegrationProp + } url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services/{{service_id}}") if err != nil { @@ -479,6 +599,10 @@ func resourceDataprocMetastoreServiceUpdate(d *schema.ResourceData, meta interfa if d.HasChange("hive_metastore_config") { updateMask = append(updateMask, "hiveMetastoreConfig") } + + if d.HasChange("metadata_integration") { + updateMask = append(updateMask, "metadataIntegration") + } // updateMask is a URL parameter but not present in the schema, so replaceVars // won't set it url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -686,14 +810,22 @@ func flattenDataprocMetastoreServiceHiveMetastoreConfig(v interface{}, d *schema return nil } transformed := make(map[string]interface{}) + transformed["endpoint_protocol"] = + flattenDataprocMetastoreServiceHiveMetastoreConfigEndpointProtocol(original["endpointProtocol"], d, config) transformed["version"] = flattenDataprocMetastoreServiceHiveMetastoreConfigVersion(original["version"], d, config) transformed["config_overrides"] = flattenDataprocMetastoreServiceHiveMetastoreConfigConfigOverrides(original["configOverrides"], d, config) transformed["kerberos_config"] = flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfig(original["kerberosConfig"], d, config) + transformed["auxiliary_versions"] = + flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersions(original["auxiliaryVersions"], d, config) return []interface{}{transformed} } +func flattenDataprocMetastoreServiceHiveMetastoreConfigEndpointProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenDataprocMetastoreServiceHiveMetastoreConfigVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -744,6 +876,72 @@ func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigG return v } +func flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersions(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(l)) + for k, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "key": k, + "version": flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsVersion(original["version"], d, config), + "config_overrides": flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsConfigOverrides(original["configOverrides"], d, config), + }) + } + return transformed +} +func flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsConfigOverrides(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceDatabaseType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceReleaseChannel(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceMetadataIntegration(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["data_catalog_config"] = + flattenDataprocMetastoreServiceMetadataIntegrationDataCatalogConfig(original["dataCatalogConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataprocMetastoreServiceMetadataIntegrationDataCatalogConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenDataprocMetastoreServiceMetadataIntegrationDataCatalogConfigEnabled(original["enabled"], d, config) + return []interface{}{transformed} +} +func flattenDataprocMetastoreServiceMetadataIntegrationDataCatalogConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func expandDataprocMetastoreServiceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil @@ -833,6 +1031,13 @@ func expandDataprocMetastoreServiceHiveMetastoreConfig(v interface{}, d Terrafor original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) + transformedEndpointProtocol, err := expandDataprocMetastoreServiceHiveMetastoreConfigEndpointProtocol(original["endpoint_protocol"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndpointProtocol); val.IsValid() && !isEmptyValue(val) { + transformed["endpointProtocol"] = transformedEndpointProtocol + } + transformedVersion, err := expandDataprocMetastoreServiceHiveMetastoreConfigVersion(original["version"], d, config) if err != nil { return nil, err @@ -854,9 +1059,20 @@ func expandDataprocMetastoreServiceHiveMetastoreConfig(v interface{}, d Terrafor transformed["kerberosConfig"] = transformedKerberosConfig } + transformedAuxiliaryVersions, err := expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersions(original["auxiliary_versions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAuxiliaryVersions); val.IsValid() && !isEmptyValue(val) { + transformed["auxiliaryVersions"] = transformedAuxiliaryVersions + } + return transformed, nil } +func expandDataprocMetastoreServiceHiveMetastoreConfigEndpointProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandDataprocMetastoreServiceHiveMetastoreConfigVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -935,3 +1151,100 @@ func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigPrincipal(v func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigGcsUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } + +func expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersions(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVersion, err := expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedConfigOverrides, err := expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsConfigOverrides(original["config_overrides"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConfigOverrides); val.IsValid() && !isEmptyValue(val) { + transformed["configOverrides"] = transformedConfigOverrides + } + + transformedKey, err := expandString(original["key"], d, config) + if err != nil { + return nil, err + } + m[transformedKey] = transformed + } + return m, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsConfigOverrides(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocMetastoreServiceDatabaseType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceReleaseChannel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceMetadataIntegration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDataCatalogConfig, err := expandDataprocMetastoreServiceMetadataIntegrationDataCatalogConfig(original["data_catalog_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataCatalogConfig); val.IsValid() && !isEmptyValue(val) { + transformed["dataCatalogConfig"] = transformedDataCatalogConfig + } + + return transformed, nil +} + +func expandDataprocMetastoreServiceMetadataIntegrationDataCatalogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandDataprocMetastoreServiceMetadataIntegrationDataCatalogConfigEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + return transformed, nil +} + +func expandDataprocMetastoreServiceMetadataIntegrationDataCatalogConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service_generated_test.go index f29709e659..3b15dfd677 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service_generated_test.go @@ -32,7 +32,7 @@ func TestAccDataprocMetastoreService_dataprocMetastoreServiceBasicExample(t *tes vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckDataprocMetastoreServiceDestroyProducer(t), Steps: []resource.TestStep{ { @@ -51,7 +51,6 @@ func TestAccDataprocMetastoreService_dataprocMetastoreServiceBasicExample(t *tes func testAccDataprocMetastoreService_dataprocMetastoreServiceBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_dataproc_metastore_service" "default" { - provider = google-beta service_id = "tf-test-metastore-srv%{random_suffix}" location = "us-central1" port = 9080 @@ -78,7 +77,7 @@ func TestAccDataprocMetastoreService_dataprocMetastoreServiceCmekTestExample(t * vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckDataprocMetastoreServiceDestroyProducer(t), Steps: []resource.TestStep{ { @@ -96,17 +95,12 @@ func TestAccDataprocMetastoreService_dataprocMetastoreServiceCmekTestExample(t * func testAccDataprocMetastoreService_dataprocMetastoreServiceCmekTestExample(context map[string]interface{}) string { return Nprintf(` -data "google_project" "project" { - provider = google-beta -} +data "google_project" "project" {} -data "google_storage_project_service_account" "gcs_account" { - provider = google-beta -} +data "google_storage_project_service_account" "gcs_account" {} resource "google_dataproc_metastore_service" "default" { - provider = google-beta service_id = "tf-test-example-service%{random_suffix}" location = "us-central1" @@ -122,7 +116,6 @@ resource "google_dataproc_metastore_service" "default" { } resource "google_kms_crypto_key" "crypto_key" { - provider = google-beta name = "tf-test-example-key%{random_suffix}" key_ring = google_kms_key_ring.key_ring.id @@ -130,13 +123,11 @@ resource "google_kms_crypto_key" "crypto_key" { } resource "google_kms_key_ring" "key_ring" { - provider = google-beta name = "tf-test-example-keyring%{random_suffix}" location = "us-central1" } resource "google_kms_crypto_key_iam_binding" "crypto_key_binding" { - provider = google-beta crypto_key_id = google_kms_crypto_key.crypto_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" @@ -148,6 +139,137 @@ resource "google_kms_crypto_key_iam_binding" "crypto_key_binding" { `, context) } +func TestAccDataprocMetastoreService_dataprocMetastoreServiceEndpointExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckDataprocMetastoreServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreService_dataprocMetastoreServiceEndpointExample(context), + }, + { + ResourceName: "google_dataproc_metastore_service.endpoint", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_id", "location"}, + }, + }, + }) +} + +func testAccDataprocMetastoreService_dataprocMetastoreServiceEndpointExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_service" "endpoint" { + provider = google-beta + service_id = "tf-test-metastore-endpoint%{random_suffix}" + location = "us-central1" + tier = "DEVELOPER" + + hive_metastore_config { + version = "3.1.2" + endpoint_protocol = "GRPC" + } +} +`, context) +} + +func TestAccDataprocMetastoreService_dataprocMetastoreServiceAuxExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckDataprocMetastoreServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreService_dataprocMetastoreServiceAuxExample(context), + }, + { + ResourceName: "google_dataproc_metastore_service.aux", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_id", "location"}, + }, + }, + }) +} + +func testAccDataprocMetastoreService_dataprocMetastoreServiceAuxExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_service" "aux" { + provider = google-beta + service_id = "tf-test-metastore-aux%{random_suffix}" + location = "us-central1" + tier = "DEVELOPER" + + hive_metastore_config { + version = "3.1.2" + auxiliary_versions { + key = "aux-test" + version = "2.3.6" + } + } +} +`, context) +} + +func TestAccDataprocMetastoreService_dataprocMetastoreServiceMetadataExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckDataprocMetastoreServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreService_dataprocMetastoreServiceMetadataExample(context), + }, + { + ResourceName: "google_dataproc_metastore_service.metadata", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_id", "location"}, + }, + }, + }) +} + +func testAccDataprocMetastoreService_dataprocMetastoreServiceMetadataExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_metastore_service" "metadata" { + provider = google-beta + service_id = "tf-test-metastore-metadata%{random_suffix}" + location = "us-central1" + tier = "DEVELOPER" + + metadata_integration { + data_catalog_config { + enabled = true + } + } + + hive_metastore_config { + version = "3.1.2" + } +} +`, context) +} + func testAccCheckDataprocMetastoreServiceDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_workflow_template.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_workflow_template.go index db36d7badf..4b49861740 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_workflow_template.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_workflow_template.go @@ -383,7 +383,7 @@ func DataprocWorkflowTemplateJobsHiveJobQueryListSchema() *schema.Resource { Type: schema.TypeList, Required: true, ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -473,7 +473,7 @@ func DataprocWorkflowTemplateJobsPigJobQueryListSchema() *schema.Resource { Type: schema.TypeList, Required: true, ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -562,7 +562,7 @@ func DataprocWorkflowTemplateJobsPrestoJobQueryListSchema() *schema.Resource { Type: schema.TypeList, Required: true, ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -900,7 +900,7 @@ func DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchema() *schema.Resource { Type: schema.TypeList, Required: true, ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -2121,12 +2121,12 @@ func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interfa Version: dcl.Int64OrNil(int64(d.Get("version").(int))), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -2143,7 +2143,7 @@ func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interfa } else { client.Config.BasePath = bp } - res, err := client.ApplyWorkflowTemplate(context.Background(), obj, createDirective...) + res, err := client.ApplyWorkflowTemplate(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -4169,7 +4169,6 @@ func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigO } return items } - func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsArray(o interface{}) []dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { objs := o.([]interface{}) items := make([]dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum, 0, len(objs)) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_managed_zone_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_managed_zone_generated_test.go index b261157616..f0c78026b3 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_managed_zone_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_managed_zone_generated_test.go @@ -32,7 +32,7 @@ func TestAccDNSManagedZone_dnsManagedZoneQuickstartExample(t *testing.T) { vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), Steps: []resource.TestStep{ { @@ -52,14 +52,13 @@ func testAccDNSManagedZone_dnsManagedZoneQuickstartExample(context map[string]in return Nprintf(` # to setup a web-server resource "google_compute_instance" "default" { - provider = google-beta name = "tf-test-dns-compute-instance%{random_suffix}" machine_type = "g1-small" zone = "us-central1-b" boot_disk { initialize_params { - image = "debian-cloud/debian-9" + image = "debian-cloud/debian-11" } } @@ -78,7 +77,6 @@ resource "google_compute_instance" "default" { # to allow http traffic resource "google_compute_firewall" "default" { - provider = google-beta name = "tf-test-allow-http-traffic%{random_suffix}" network = "default" allow { @@ -90,7 +88,6 @@ resource "google_compute_firewall" "default" { # to create a DNS zone resource "google_dns_managed_zone" "default" { - provider = google-beta name = "tf-test-example-zone-googlecloudexample%{random_suffix}" dns_name = "googlecloudexample.com." description = "Example DNS zone" @@ -99,7 +96,6 @@ resource "google_dns_managed_zone" "default" { # to register web-server's ip address in DNS resource "google_dns_record_set" "default" { - provider = google-beta name = google_dns_managed_zone.default.dns_name managed_zone = google_dns_managed_zone.default.name type = "A" @@ -111,6 +107,48 @@ resource "google_dns_record_set" "default" { `, context) } +func TestAccDNSManagedZone_dnsRecordSetBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDNSManagedZone_dnsRecordSetBasicExample(context), + }, + { + ResourceName: "google_dns_managed_zone.parent-zone", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDNSManagedZone_dnsRecordSetBasicExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_dns_managed_zone" "parent-zone" { + name = "tf-test-sample-zone%{random_suffix}" + dns_name = "tf-test-sample-zone%{random_suffix}.hashicorptest.com." + description = "Test Description" +} + +resource "google_dns_record_set" "default" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.tf-test-sample-zone%{random_suffix}.hashicorptest.com." + type = "A" + rrdatas = ["10.0.0.1", "10.1.0.1"] + ttl = 86400 +} +`, context) +} + func TestAccDNSManagedZone_dnsManagedZoneBasicExample(t *testing.T) { skipIfVcr(t) t.Parallel() diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set.go index b33c2e3b79..6cf1ae6a34 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set.go @@ -13,7 +13,7 @@ import ( ) func rrdatasDnsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - if k == "rrdatas.#" && new == "0" && old != new { + if k == "rrdatas.#" && (new == "0" || new == "") && old != new { return false } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set_test.go index 5b883a3769..d1f1b6c578 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set_test.go @@ -306,6 +306,29 @@ func TestAccDNSRecordSet_changeRouting(t *testing.T) { }) } +// Tracks fix for https://github.com/hashicorp/terraform-provider-google/issues/12043 +func TestAccDNSRecordSet_interpolated(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-%s", randString(t, 10)) + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_interpolated(zoneName), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/test-record.%s.hashicorptest.com./TXT", zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckDnsRecordSetDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { @@ -491,3 +514,21 @@ resource "google_dns_record_set" "foobar" { } `, zoneName, zoneName, zoneName, ttl, location, addr2) } + +func testAccDnsRecordSet_interpolated(zoneName string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "TXT" + rrdatas = ["127.0.0.1", "firebase=${google_dns_managed_zone.parent-zone.id}"] + ttl = 10 +} +`, zoneName, zoneName, zoneName) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor.go index c71d9069a6..89e1862bd3 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor.go @@ -134,7 +134,7 @@ func resourceDocumentAIProcessorCreate(d *schema.ResourceData, meta interface{}) } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/processors/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -152,7 +152,7 @@ func resourceDocumentAIProcessorRead(d *schema.ResourceData, meta interface{}) e return err } - url, err := replaceVars(d, config, "{{DocumentAIBasePath}}{{name}}") + url, err := replaceVars(d, config, "{{DocumentAIBasePath}}projects/{{project}}/locations/{{location}}/processors/{{name}}") if err != nil { return err } @@ -210,7 +210,7 @@ func resourceDocumentAIProcessorDelete(d *schema.ResourceData, meta interface{}) } billingProject = project - url, err := replaceVars(d, config, "{{DocumentAIBasePath}}{{name}}") + url, err := replaceVars(d, config, "{{DocumentAIBasePath}}projects/{{project}}/locations/{{location}}/processors/{{name}}") if err != nil { return err } @@ -235,13 +235,15 @@ func resourceDocumentAIProcessorDelete(d *schema.ResourceData, meta interface{}) func resourceDocumentAIProcessorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*Config) if err := parseImportId([]string{ - "(?P.+)", + "projects/(?P[^/]+)/locations/(?P[^/]+)/processors/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", }, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/processors/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -251,7 +253,10 @@ func resourceDocumentAIProcessorImport(d *schema.ResourceData, meta interface{}) } func flattenDocumentAIProcessorName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + if v == nil { + return v + } + return NameFromSelfLinkStateFunc(v) } func flattenDocumentAIProcessorType(v interface{}, d *schema.ResourceData, config *Config) interface{} { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_default_version.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_default_version.go index bcf59c8bac..e0ae811ff4 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_default_version.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_default_version.go @@ -18,6 +18,7 @@ import ( "fmt" "log" "reflect" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -46,10 +47,11 @@ func resourceDocumentAIProcessorDefaultVersion() *schema.Resource { Description: `The processor to set the version on.`, }, "version": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The version to set`, + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: projectNumberDiffSuppress, + Description: `The version to set`, }, }, UseJSONNumber: true, @@ -90,6 +92,11 @@ func resourceDocumentAIProcessorDefaultVersionCreate(d *schema.ResourceData, met billingProject = bp } + if strings.Contains(url, "https://-") { + location := GetRegionFromRegionalSelfLink(url) + url = strings.TrimPrefix(url, "https://") + url = "https://" + location + url + } res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) if err != nil { return fmt.Errorf("Error creating ProcessorDefaultVersion: %s", err) @@ -126,6 +133,11 @@ func resourceDocumentAIProcessorDefaultVersionRead(d *schema.ResourceData, meta billingProject = bp } + if strings.Contains(url, "https://-") { + location := GetRegionFromRegionalSelfLink(url) + url = strings.TrimPrefix(url, "https://") + url = "https://" + location + url + } res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("DocumentAIProcessorDefaultVersion %q", d.Id())) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_generated_test.go index aca7284cc4..8a940c5deb 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_generated_test.go @@ -58,6 +58,41 @@ resource "google_document_ai_processor" "processor" { `, context) } +func TestAccDocumentAIProcessor_documentaiProcessorEuExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDocumentAIProcessorDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDocumentAIProcessor_documentaiProcessorEuExample(context), + }, + { + ResourceName: "google_document_ai_processor.processor", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location"}, + }, + }, + }) +} + +func testAccDocumentAIProcessor_documentaiProcessorEuExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_document_ai_processor" "processor" { + location = "eu" + display_name = "tf-test-test-processor%{random_suffix}" + type = "OCR_PROCESSOR" +} +`, context) +} + func testAccCheckDocumentAIProcessorDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { @@ -70,7 +105,7 @@ func testAccCheckDocumentAIProcessorDestroyProducer(t *testing.T) func(s *terraf config := googleProviderConfig(t) - url, err := replaceVarsForTest(config, rs, "{{DocumentAIBasePath}}{{name}}") + url, err := replaceVarsForTest(config, rs, "{{DocumentAIBasePath}}projects/{{project}}/locations/{{location}}/processors/{{name}}") if err != nil { return err } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_sweeper_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_sweeper_test.go index 8ddf6368d1..6234f5da37 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_sweeper_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_sweeper_test.go @@ -61,7 +61,7 @@ func testSweepDocumentAIProcessor(region string) error { }, } - listTemplate := strings.Split("https://documentai.googleapis.com/v1/{{name}}", "?")[0] + listTemplate := strings.Split("https://{{location}}-documentai.googleapis.com/v1/projects/{{project}}/locations/{{location}}/processors", "?")[0] listUrl, err := replaceVars(d, config, listTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) @@ -99,7 +99,7 @@ func testSweepDocumentAIProcessor(region string) error { continue } - deleteTemplate := "https://documentai.googleapis.com/v1/{{name}}" + deleteTemplate := "https://{{location}}-documentai.googleapis.com/v1/projects/{{project}}/locations/{{location}}/processors/{{name}}" deleteUrl, err := replaceVars(d, config, deleteTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_eventarc_trigger.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_eventarc_trigger.go index 5ae3025709..78ee4d1af3 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_eventarc_trigger.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_eventarc_trigger.go @@ -311,12 +311,12 @@ func resourceEventarcTriggerCreate(d *schema.ResourceData, meta interface{}) err Transport: expandEventarcTriggerTransport(d.Get("transport")), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -333,7 +333,7 @@ func resourceEventarcTriggerCreate(d *schema.ResourceData, meta interface{}) err } else { client.Config.BasePath = bp } - res, err := client.ApplyTrigger(context.Background(), obj, createDirective...) + res, err := client.ApplyTrigger(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_filestore_instance.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_filestore_instance.go index 23cbaf5bcb..b085c66a1f 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_filestore_instance.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_filestore_instance.go @@ -152,9 +152,10 @@ IP addresses assigned. Possible values: ["ADDRESS_MODE_UNSPECIFIED", "MODE_IPV4" }, }, "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, Description: `The name of the GCE VPC network to which the instance is connected.`, }, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_release.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_release.go index e0c943ca01..5f66cc19ed 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_release.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_release.go @@ -49,14 +49,14 @@ func resourceFirebaserulesRelease() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "Format: `projects/{project_id}/releases/{release_id}`", + Description: "Format: `projects/{project_id}/releases/{release_id}`\\Firestore Rules Releases will **always** have the name 'cloud.firestore'", }, "ruleset_name": { Type: schema.TypeString, Required: true, DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must exist the `Release` to be created.", + Description: "Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must exist for the `Release` to be created.", }, "project": { @@ -102,12 +102,12 @@ func resourceFirebaserulesReleaseCreate(d *schema.ResourceData, meta interface{} Project: dcl.String(project), } - id, err := replaceVars(d, config, "projects/{{project}}/releases/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -124,7 +124,7 @@ func resourceFirebaserulesReleaseCreate(d *schema.ResourceData, meta interface{} } else { client.Config.BasePath = bp } - res, err := client.ApplyRelease(context.Background(), obj, createDirective...) + res, err := client.ApplyRelease(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_ruleset.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_ruleset.go index 855b9fbe8d..e849f86eb8 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_ruleset.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_ruleset.go @@ -156,12 +156,12 @@ func resourceFirebaserulesRulesetCreate(d *schema.ResourceData, meta interface{} Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/rulesets/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -178,7 +178,7 @@ func resourceFirebaserulesRulesetCreate(d *schema.ResourceData, meta interface{} } else { client.Config.BasePath = bp } - res, err := client.ApplyRuleset(context.Background(), obj, createDirective...) + res, err := client.ApplyRuleset(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -191,10 +191,11 @@ func resourceFirebaserulesRulesetCreate(d *schema.ResourceData, meta interface{} if err = d.Set("name", res.Name); err != nil { return fmt.Errorf("error setting name in state: %s", err) } - // Id has a server-generated value, set again after creation - id, err = replaceVarsForId(d, config, "projects/{{project}}/rulesets/{{name}}") + // ID has a server-generated value, set again after creation. + + id, err = res.ID() if err != nil { - return fmt.Errorf("Error constructing id: %s", err) + return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature.go index d8c02e4de5..ca75ab9813 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature.go @@ -221,12 +221,12 @@ func resourceGkeHubFeatureCreate(d *schema.ResourceData, meta interface{}) error mutexKV.Lock(lockName) defer mutexKV.Unlock(lockName) - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/features/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -243,7 +243,7 @@ func resourceGkeHubFeatureCreate(d *schema.ResourceData, meta interface{}) error } else { client.Config.BasePath = bp } - res, err := client.ApplyFeature(context.Background(), obj, createDirective...) + res, err := client.ApplyFeature(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership.go index 25f01d2eea..47a21b504d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership.go @@ -197,7 +197,7 @@ func GkeHubFeatureMembershipConfigmanagementConfigSyncGitSchema() *schema.Resour "secret_type": { Type: schema.TypeString, Optional: true, - Description: "Type of secret configured for access to the Git repo.", + Description: "Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount or none. The validation of this is case-sensitive.", }, "sync_branch": { @@ -279,6 +279,21 @@ func GkeHubFeatureMembershipConfigmanagementPolicyControllerSchema() *schema.Res Description: "Logs all denies and dry run failures.", }, + "monitoring": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Specifies the backends Policy Controller should export metrics to. For example, to specify metrics should be exported to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\", \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringSchema(), + }, + + "mutation_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enable or disable mutation in policy controller. If true, mutation CRDs, webhook and controller deployment will be deployed to the cluster.", + }, + "referential_rules_enabled": { Type: schema.TypeBool, Optional: true, @@ -294,6 +309,20 @@ func GkeHubFeatureMembershipConfigmanagementPolicyControllerSchema() *schema.Res } } +func GkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backends": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: " Specifies the list of backends Policy Controller will export to. Specifying an empty value `[]` disables metrics export.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + func resourceGkeHubFeatureMembershipCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) project, err := getProject(d, config) @@ -320,7 +349,7 @@ func resourceGkeHubFeatureMembershipCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -337,7 +366,7 @@ func resourceGkeHubFeatureMembershipCreate(d *schema.ResourceData, meta interfac } else { client.Config.BasePath = bp } - res, err := client.ApplyFeatureMembership(context.Background(), obj, createDirective...) + res, err := client.ApplyFeatureMembership(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -702,6 +731,8 @@ func expandGkeHubFeatureMembershipConfigmanagementPolicyController(o interface{} Enabled: dcl.Bool(obj["enabled"].(bool)), ExemptableNamespaces: expandStringArray(obj["exemptable_namespaces"]), LogDeniesEnabled: dcl.Bool(obj["log_denies_enabled"].(bool)), + Monitoring: expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj["monitoring"]), + MutationEnabled: dcl.Bool(obj["mutation_enabled"].(bool)), ReferentialRulesEnabled: dcl.Bool(obj["referential_rules_enabled"].(bool)), TemplateLibraryInstalled: dcl.Bool(obj["template_library_installed"].(bool)), } @@ -716,6 +747,8 @@ func flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj *gkehub. "enabled": obj.Enabled, "exemptable_namespaces": obj.ExemptableNamespaces, "log_denies_enabled": obj.LogDeniesEnabled, + "monitoring": flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj.Monitoring), + "mutation_enabled": obj.MutationEnabled, "referential_rules_enabled": obj.ReferentialRulesEnabled, "template_library_installed": obj.TemplateLibraryInstalled, } @@ -723,3 +756,48 @@ func flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj *gkehub. return []interface{}{transformed} } + +func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(o interface{}) *gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring{ + Backends: expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj["backends"]), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj *gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "backends": flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj.Backends), + } + + return []interface{}{transformed} + +} +func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj []gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(o interface{}) []gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + objs := o.([]interface{}) + items := make([]gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum, 0, len(objs)) + for _, item := range objs { + i := gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership_test.go index ea13d6b038..b182a5244a 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership_test.go @@ -101,7 +101,7 @@ resource "google_gke_hub_feature_membership" "feature_member_1" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.9.0" + version = "1.12.0" config_sync { source_format = "hierarchy" git { @@ -118,7 +118,7 @@ resource "google_gke_hub_feature_membership" "feature_member_2" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_second.membership_id configmanagement { - version = "1.9.0" + version = "1.12.0" config_sync { source_format = "hierarchy" git { @@ -151,7 +151,7 @@ resource "google_gke_hub_feature_membership" "feature_member_1" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.9.0" + version = "1.12.0" config_sync { source_format = "hierarchy" git { @@ -168,7 +168,7 @@ resource "google_gke_hub_feature_membership" "feature_member_2" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_second.membership_id configmanagement { - version = "1.9.0" + version = "1.12.0" config_sync { source_format = "hierarchy" git { @@ -207,7 +207,7 @@ resource "google_gke_hub_feature_membership" "feature_member_2" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_second.membership_id configmanagement { - version = "1.9.0" + version = "1.12.0" config_sync { source_format = "unstructured" git { @@ -235,7 +235,7 @@ resource "google_gke_hub_feature_membership" "feature_member_3" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_third.membership_id configmanagement { - version = "1.9.0" + version = "1.12.0" config_sync { source_format = "hierarchy" git { @@ -256,6 +256,29 @@ resource "google_gke_hub_feature_membership" "feature_member_3" { } provider = google-beta } + +resource "google_gke_hub_feature_membership" "feature_member_4" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership_fourth.membership_id + configmanagement { + version = "1.12.0" + policy_controller { + enabled = true + audit_interval_seconds = "100" + template_library_installed = true + mutation_enabled = true + monitoring { + backends = ["CLOUD_MONITORING", "PROMETHEUS"] + } + } + } + provider = google-beta +} + + + `, context) } @@ -279,7 +302,7 @@ resource "google_gke_hub_feature_membership" "feature_member_3" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_third.membership_id configmanagement { - version = "1.9.0" + version = "1.12.0" policy_controller { enabled = true audit_interval_seconds = "100" @@ -397,7 +420,7 @@ resource "google_gke_hub_feature_membership" "feature_member" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.9.0" + version = "1.12.0" config_sync { git { sync_repo = "https://github.com/hashicorp/terraform" @@ -463,7 +486,7 @@ resource "google_gke_hub_feature_membership" "feature_member" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.10.1" + version = "1.12.0" config_sync { git { sync_repo = "https://github.com/hashicorp/terraform" @@ -536,7 +559,7 @@ resource "google_gke_hub_feature_membership" "feature_member" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.9.0" + version = "1.12.0" config_sync { git { sync_repo = "https://github.com/hashicorp/terraform" @@ -577,6 +600,16 @@ resource "google_container_cluster" "tertiary" { depends_on = [google_project_service.container, google_project_service.container, google_project_service.gkehub] } + +resource "google_container_cluster" "quarternary" { + name = "tf-test-cl4%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + project = google_project.project.project_id + provider = google-beta + depends_on = [google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + resource "google_gke_hub_membership" "membership" { project = google_project.project.project_id membership_id = "tf-test1%{random_suffix}" @@ -612,6 +645,18 @@ resource "google_gke_hub_membership" "membership_third" { description = "test resource." provider = google-beta } + +resource "google_gke_hub_membership" "membership_fourth" { + project = google_project.project.project_id + membership_id = "tf-test4%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.quarternary.id}" + } + } + description = "test resource." + provider = google-beta +} `, context) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership.go index 3ba8c7e618..8af1158eec 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership.go @@ -70,6 +70,7 @@ https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity`, "issuer": { Type: schema.TypeString, Required: true, + ForceNew: true, Description: `A JSON Web Token (JWT) issuer URI. 'issuer' must start with 'https://' and // be a valid with length <2000 characters. For example: 'https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster' (must be 'locations' rather than 'zones'). If the cluster is provisioned with Terraform, this is '"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}"'.`, }, @@ -195,7 +196,7 @@ func resourceGKEHubMembershipCreate(d *schema.ResourceData, meta interface{}) er } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -218,7 +219,7 @@ func resourceGKEHubMembershipCreate(d *schema.ResourceData, meta interface{}) er } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{name}}") + id, err = replaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -236,7 +237,7 @@ func resourceGKEHubMembershipRead(d *schema.ResourceData, meta interface{}) erro return err } - url, err := replaceVars(d, config, "{{GKEHubBasePath}}{{name}}") + url, err := replaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") if err != nil { return err } @@ -382,7 +383,7 @@ func resourceGKEHubMembershipDelete(d *schema.ResourceData, meta interface{}) er } billingProject = project - url, err := replaceVars(d, config, "{{GKEHubBasePath}}{{name}}") + url, err := replaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") if err != nil { return err } @@ -415,13 +416,15 @@ func resourceGKEHubMembershipDelete(d *schema.ResourceData, meta interface{}) er func resourceGKEHubMembershipImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*Config) if err := parseImportId([]string{ - "(?P.+)", + "projects/(?P[^/]+)/locations/global/memberships/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", }, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership_generated_test.go index 208211f47d..a3a227d1e1 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership_generated_test.go @@ -130,7 +130,7 @@ func testAccCheckGKEHubMembershipDestroyProducer(t *testing.T) func(s *terraform config := googleProviderConfig(t) - url, err := replaceVarsForTest(config, rs, "{{GKEHubBasePath}}{{name}}") + url, err := replaceVarsForTest(config, rs, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") if err != nil { return err } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_google_service_usage_consumer_quota_override_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_google_service_usage_consumer_quota_override_test.go new file mode 100644 index 0000000000..872d3c278c --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_google_service_usage_consumer_quota_override_test.go @@ -0,0 +1,50 @@ +package google + +import ( + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccServiceUsageConsumerQuotaOverride_consumerQuotaOverrideCustomIncorrectLimitFormat(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": getTestOrgFromEnv(t), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProvidersOiCS, + CheckDestroy: testAccCheckServiceUsageConsumerQuotaOverrideDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceUsageConsumerQuotaOverride_consumerQuotaOverrideCustomIncorrectLimitFormat(context), + ExpectError: regexp.MustCompile("No quota limit with limitId"), + }, + }, + }) +} + +func testAccServiceUsageConsumerQuotaOverride_consumerQuotaOverrideCustomIncorrectLimitFormat(context map[string]interface{}) string { + return Nprintf(` +resource "google_project" "my_project" { + provider = google-beta + name = "tf-test-project" + project_id = "quota%{random_suffix}" + org_id = "%{org_id}" +} + +resource "google_service_usage_consumer_quota_override" "override" { + provider = google-beta + project = google_project.my_project.project_id + service = urlencode("bigquery.googleapis.com") + metric = urlencode("bigquery.googleapis.com/quota/query/usage") + limit = urlencode("1/d/{project}/{user}") # Incorrect format for the API the provider uses, correct format for the gcloud CLI + override_value = "1" + force = true +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_iam_workload_identity_pool_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_iam_workload_identity_pool_generated_test.go index 123ecb169a..f1cbe235f1 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_iam_workload_identity_pool_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_iam_workload_identity_pool_generated_test.go @@ -32,7 +32,7 @@ func TestAccIAMBetaWorkloadIdentityPool_iamWorkloadIdentityPoolBasicExample(t *t vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolDestroyProducer(t), Steps: []resource.TestStep{ { @@ -51,7 +51,6 @@ func TestAccIAMBetaWorkloadIdentityPool_iamWorkloadIdentityPoolBasicExample(t *t func testAccIAMBetaWorkloadIdentityPool_iamWorkloadIdentityPoolBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_iam_workload_identity_pool" "example" { - provider = google-beta workload_identity_pool_id = "tf-test-example-pool%{random_suffix}" } `, context) @@ -66,7 +65,7 @@ func TestAccIAMBetaWorkloadIdentityPool_iamWorkloadIdentityPoolFullExample(t *te vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolDestroyProducer(t), Steps: []resource.TestStep{ { @@ -85,7 +84,6 @@ func TestAccIAMBetaWorkloadIdentityPool_iamWorkloadIdentityPoolFullExample(t *te func testAccIAMBetaWorkloadIdentityPool_iamWorkloadIdentityPoolFullExample(context map[string]interface{}) string { return Nprintf(` resource "google_iam_workload_identity_pool" "example" { - provider = google-beta workload_identity_pool_id = "tf-test-example-pool%{random_suffix}" display_name = "Name of pool" description = "Identity pool for automated test" diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_iam_workload_identity_pool_provider_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_iam_workload_identity_pool_provider_generated_test.go index d5477c8d77..38885f76d1 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_iam_workload_identity_pool_provider_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_iam_workload_identity_pool_provider_generated_test.go @@ -32,7 +32,7 @@ func TestAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderA vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolProviderDestroyProducer(t), Steps: []resource.TestStep{ { @@ -51,12 +51,10 @@ func TestAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderA func testAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderAwsBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_iam_workload_identity_pool" "pool" { - provider = google-beta workload_identity_pool_id = "tf-test-example-pool%{random_suffix}" } resource "google_iam_workload_identity_pool_provider" "example" { - provider = google-beta workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id workload_identity_pool_provider_id = "tf-test-example-prvdr%{random_suffix}" aws { @@ -75,7 +73,7 @@ func TestAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderA vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolProviderDestroyProducer(t), Steps: []resource.TestStep{ { @@ -94,12 +92,10 @@ func TestAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderA func testAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderAwsFullExample(context map[string]interface{}) string { return Nprintf(` resource "google_iam_workload_identity_pool" "pool" { - provider = google-beta workload_identity_pool_id = "tf-test-example-pool%{random_suffix}" } resource "google_iam_workload_identity_pool_provider" "example" { - provider = google-beta workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id workload_identity_pool_provider_id = "tf-test-example-prvdr%{random_suffix}" display_name = "Name of provider" @@ -127,7 +123,7 @@ func TestAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderO vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolProviderDestroyProducer(t), Steps: []resource.TestStep{ { @@ -146,12 +142,10 @@ func TestAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderO func testAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderOidcBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_iam_workload_identity_pool" "pool" { - provider = google-beta workload_identity_pool_id = "tf-test-example-pool%{random_suffix}" } resource "google_iam_workload_identity_pool_provider" "example" { - provider = google-beta workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id workload_identity_pool_provider_id = "tf-test-example-prvdr%{random_suffix}" attribute_mapping = { @@ -173,7 +167,7 @@ func TestAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderO vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolProviderDestroyProducer(t), Steps: []resource.TestStep{ { @@ -192,12 +186,10 @@ func TestAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderO func testAccIAMBetaWorkloadIdentityPoolProvider_iamWorkloadIdentityPoolProviderOidcFullExample(context map[string]interface{}) string { return Nprintf(` resource "google_iam_workload_identity_pool" "pool" { - provider = google-beta workload_identity_pool_id = "tf-test-example-pool%{random_suffix}" } resource "google_iam_workload_identity_pool_provider" "example" { - provider = google-beta workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id workload_identity_pool_provider_id = "tf-test-example-prvdr%{random_suffix}" display_name = "Name of provider" diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key.go index 4421d49882..77187e6fcf 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key.go @@ -92,10 +92,10 @@ If not specified at creation time, the default duration is 24 hours.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", ""}), + ValidateFunc: validateEnum([]string{"ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", "MAC", ""}), Description: `The immutable purpose of this CryptoKey. See the [purpose reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) -for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT"]`, +for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", "MAC"]`, Default: "ENCRYPT_DECRYPT", }, "rotation_period": { @@ -132,7 +132,7 @@ See the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v Type: schema.TypeString, Optional: true, ForceNew: true, - Description: `The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL". Defaults to "SOFTWARE".`, + Description: `The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL", "EXTERNAL_VPC". Defaults to "SOFTWARE".`, Default: "SOFTWARE", }, }, @@ -430,6 +430,12 @@ func resourceKMSCryptoKeyImport(d *schema.ResourceData, meta interface{}) ([]*sc return nil, fmt.Errorf("Error setting skip_initial_version_creation: %s", err) } + id, err := replaceVars(d, config, "{{key_ring}}/cryptoKeys/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return []*schema.ResourceData{d}, nil } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key_test.go index 0657d2563e..fc3fc28c00 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key_test.go @@ -187,6 +187,13 @@ func TestAccKmsCryptoKey_basic(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + // Test importing with a short id + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateId: fmt.Sprintf("%s/%s/%s/%s", projectId, location, keyRingName, cryptoKeyName), + ImportStateVerify: true, + }, // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. { Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_key_ring_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_key_ring_test.go index e8728d8d66..8537b2a260 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_key_ring_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_key_ring_test.go @@ -99,10 +99,8 @@ func TestAccKmsKeyRing_basic(t *testing.T) { }) } -/* - KMS KeyRings cannot be deleted. This ensures that the KeyRing resource was removed from state, - even though the server-side resource was not removed. -*/ +// KMS KeyRings cannot be deleted. This ensures that the KeyRing resource was removed from state, +// even though the server-side resource was not removed. func testAccCheckGoogleKmsKeyRingWasRemovedFromState(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { _, ok := s.RootModule().Resources[resourceName] @@ -115,10 +113,8 @@ func testAccCheckGoogleKmsKeyRingWasRemovedFromState(resourceName string) resour } } -/* - This test runs in its own project, otherwise the test project would start to get filled - with undeletable resources -*/ +// This test runs in its own project, otherwise the test project would start to get filled +// with undeletable resources func testGoogleKmsKeyRing_basic(projectId, projectOrg, projectBillingAccount, keyRingName string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_logging_log_view.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_logging_log_view.go index f3685dbc43..da9a118387 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_logging_log_view.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_logging_log_view.go @@ -121,7 +121,7 @@ func resourceLoggingLogViewCreate(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -138,7 +138,7 @@ func resourceLoggingLogViewCreate(d *schema.ResourceData, meta interface{}) erro } else { client.Config.BasePath = bp } - res, err := client.ApplyLogView(context.Background(), obj, createDirective...) + res, err := client.ApplyLogView(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_manager_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_manager_operation.go index de7e0a2f51..1b91980d94 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_manager_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_manager_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_alert_policy.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_alert_policy.go index 8c0a8d4eff..e7540f37b8 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_alert_policy.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_alert_policy.go @@ -302,6 +302,14 @@ alerted on quickly.`, Required: true, Description: `Monitoring Query Language query that outputs a boolean stream.`, }, + "evaluation_missing_data": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP", ""}), + Description: `A condition control that determines how +metric-threshold conditions are evaluated when +data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]`, + }, "trigger": { Type: schema.TypeList, Optional: true, @@ -627,6 +635,14 @@ contain restrictions on resource type, resource labels, and metric labels. This field may not exceed 2048 Unicode characters in length.`, + }, + "evaluation_missing_data": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP", ""}), + Description: `A condition control that determines how +metric-threshold conditions are evaluated when +data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]`, }, "filter": { Type: schema.TypeString, @@ -1395,6 +1411,8 @@ func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(v in flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(original["duration"], d, config) transformed["trigger"] = flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(original["trigger"], d, config) + transformed["evaluation_missing_data"] = + flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(original["evaluationMissingData"], d, config) return []interface{}{transformed} } func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -1441,6 +1459,10 @@ func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigg return v // let terraform core handle it otherwise } +func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -1466,6 +1488,8 @@ func flattenMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d * flattenMonitoringAlertPolicyConditionsConditionThresholdAggregations(original["aggregations"], d, config) transformed["filter"] = flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(original["filter"], d, config) + transformed["evaluation_missing_data"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(original["evaluationMissingData"], d, config) return []interface{}{transformed} } func flattenMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -1598,6 +1622,10 @@ func flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{ return v } +func flattenMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenMonitoringAlertPolicyConditionsDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1936,6 +1964,13 @@ func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(v int transformed["trigger"] = transformedTrigger } + transformedEvaluationMissingData, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(original["evaluation_missing_data"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEvaluationMissingData); val.IsValid() && !isEmptyValue(val) { + transformed["evaluationMissingData"] = transformedEvaluationMissingData + } + return transformed, nil } @@ -1981,6 +2016,10 @@ func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigge return v, nil } +func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -2046,6 +2085,13 @@ func expandMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d Te transformed["filter"] = transformedFilter } + transformedEvaluationMissingData, err := expandMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(original["evaluation_missing_data"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEvaluationMissingData); val.IsValid() && !isEmptyValue(val) { + transformed["evaluationMissingData"] = transformedEvaluationMissingData + } + return transformed, nil } @@ -2221,6 +2267,10 @@ func expandMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{} return v, nil } +func expandMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandMonitoringAlertPolicyConditionsDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project.go index 7a271baf46..2197c9a31e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project.go @@ -74,12 +74,12 @@ func resourceMonitoringMonitoredProjectCreate(d *schema.ResourceData, meta inter Name: dcl.String(d.Get("name").(string)), } - id, err := replaceVarsForId(d, config, "locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -97,7 +97,7 @@ func resourceMonitoringMonitoredProjectCreate(d *schema.ResourceData, meta inter } else { client.Config.BasePath = bp } - res, err := client.ApplyMonitoredProject(context.Background(), obj, createDirective...) + res, err := client.ApplyMonitoredProject(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project_generated_test.go index 2fb7a1919b..702feafa38 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project_generated_test.go @@ -36,9 +36,8 @@ func TestAccMonitoringMonitoredProject_BasicMonitoredProject(t *testing.T) { } vcrTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - - Providers: testAccProvidersOiCS, + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, CheckDestroy: testAccCheckMonitoringMonitoredProjectDestroyProducer(t), Steps: []resource.TestStep{ { @@ -58,15 +57,15 @@ func testAccMonitoringMonitoredProject_BasicMonitoredProject(context map[string] resource "google_monitoring_monitored_project" "primary" { metrics_scope = "%{project_name}" name = google_project.basic.name - provider = google-beta } + resource "google_project" "basic" { project_id = "tf-test-id%{random_suffix}" name = "tf-test-id%{random_suffix}" org_id = "%{org_id}" - provider = google-beta } + `, context) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config.go index 19e3ad7c9c..91327d169b 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config.go @@ -114,6 +114,26 @@ func resourceMonitoringUptimeCheckConfig() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "accepted_response_status_codes": { + Type: schema.TypeList, + Optional: true, + Description: `If present, the check will only pass if the HTTP response status code is in this set of status codes. If empty, the HTTP status code will only pass if the HTTP status code is 200-299.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status_class": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"STATUS_CLASS_1XX", "STATUS_CLASS_2XX", "STATUS_CLASS_3XX", "STATUS_CLASS_4XX", "STATUS_CLASS_5XX", "STATUS_CLASS_ANY", ""}), + Description: `A class of status codes to accept. Possible values: ["STATUS_CLASS_1XX", "STATUS_CLASS_2XX", "STATUS_CLASS_3XX", "STATUS_CLASS_4XX", "STATUS_CLASS_5XX", "STATUS_CLASS_ANY"]`, + }, + "status_value": { + Type: schema.TypeInt, + Optional: true, + Description: `A status code to accept.`, + }, + }, + }, + }, "auth_info": { Type: schema.TypeList, Optional: true, @@ -790,6 +810,8 @@ func flattenMonitoringUptimeCheckConfigHttpCheck(v interface{}, d *schema.Resour flattenMonitoringUptimeCheckConfigHttpCheckMaskHeaders(original["maskHeaders"], d, config) transformed["body"] = flattenMonitoringUptimeCheckConfigHttpCheckBody(original["body"], d, config) + transformed["accepted_response_status_codes"] = + flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(original["acceptedResponseStatusCodes"], d, config) return []interface{}{transformed} } func flattenMonitoringUptimeCheckConfigHttpCheckRequestMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -864,6 +886,46 @@ func flattenMonitoringUptimeCheckConfigHttpCheckBody(v interface{}, d *schema.Re return v } +func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "status_value": flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(original["statusValue"], d, config), + "status_class": flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(original["statusClass"], d, config), + }) + } + return transformed +} +func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenMonitoringUptimeCheckConfigTcpCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -1118,6 +1180,13 @@ func expandMonitoringUptimeCheckConfigHttpCheck(v interface{}, d TerraformResour transformed["body"] = transformedBody } + transformedAcceptedResponseStatusCodes, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(original["accepted_response_status_codes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceptedResponseStatusCodes); val.IsValid() && !isEmptyValue(val) { + transformed["acceptedResponseStatusCodes"] = transformedAcceptedResponseStatusCodes + } + return transformed, nil } @@ -1198,6 +1267,43 @@ func expandMonitoringUptimeCheckConfigHttpCheckBody(v interface{}, d TerraformRe return v, nil } +func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStatusValue, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(original["status_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStatusValue); val.IsValid() && !isEmptyValue(val) { + transformed["statusValue"] = transformedStatusValue + } + + transformedStatusClass, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(original["status_class"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStatusClass); val.IsValid() && !isEmptyValue(val) { + transformed["statusClass"] = transformedStatusClass + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandMonitoringUptimeCheckConfigTcpCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config_generated_test.go index 30d10b01b9..cc1b47ca47 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config_generated_test.go @@ -84,6 +84,77 @@ resource "google_monitoring_uptime_check_config" "http" { `, context) } +func TestAccMonitoringUptimeCheckConfig_uptimeCheckConfigStatusCodeExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMonitoringUptimeCheckConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccMonitoringUptimeCheckConfig_uptimeCheckConfigStatusCodeExample(context), + }, + { + ResourceName: "google_monitoring_uptime_check_config.status_code", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccMonitoringUptimeCheckConfig_uptimeCheckConfigStatusCodeExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_monitoring_uptime_check_config" "status_code" { + display_name = "tf-test-http-uptime-check%{random_suffix}" + timeout = "60s" + + http_check { + path = "some-path" + port = "8010" + request_method = "POST" + content_type = "URL_ENCODED" + body = "Zm9vJTI1M0RiYXI=" + + accepted_response_status_codes { + status_class = "STATUS_CLASS_2XX" + } + accepted_response_status_codes { + status_value = 301 + } + accepted_response_status_codes { + status_value = 302 + } + } + + monitored_resource { + type = "uptime_url" + labels = { + project_id = "%{project_id}" + host = "192.168.1.1" + } + } + + content_matchers { + content = "\"example\"" + matcher = "MATCHES_JSON_PATH" + json_path_matcher { + json_path = "$.path" + json_matcher = "EXACT_MATCH" + } + } + + checker_type = "STATIC_IP_CHECKERS" +} +`, context) +} + func TestAccMonitoringUptimeCheckConfig_uptimeCheckConfigHttpsExample(t *testing.T) { t.Parallel() diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_hub.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_hub.go index 8f26b0e59c..62d2c02bde 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_hub.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_hub.go @@ -134,12 +134,12 @@ func resourceNetworkConnectivityHubCreate(d *schema.ResourceData, meta interface Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/global/hubs/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -156,7 +156,7 @@ func resourceNetworkConnectivityHubCreate(d *schema.ResourceData, meta interface } else { client.Config.BasePath = bp } - res, err := client.ApplyHub(context.Background(), obj, createDirective...) + res, err := client.ApplyHub(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_spoke.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_spoke.go index 83de162588..fff5485a32 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_spoke.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_spoke.go @@ -249,12 +249,12 @@ func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interfa Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/spokes/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -271,7 +271,7 @@ func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interfa } else { client.Config.BasePath = bp } - res, err := client.ApplySpoke(context.Background(), obj, createDirective...) + res, err := client.ApplySpoke(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_management_connectivity_test_resource_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_management_connectivity_test_resource_generated_test.go index 58957c9a3c..d2f2ac27e1 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_management_connectivity_test_resource_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_management_connectivity_test_resource_generated_test.go @@ -101,7 +101,7 @@ resource "google_compute_network" "vpc" { } data "google_compute_image" "debian_9" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } `, context) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_management_connectivity_test_resource_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_management_connectivity_test_resource_test.go index 9cd823edfe..4105ae9c8c 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_management_connectivity_test_resource_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_management_connectivity_test_resource_test.go @@ -132,7 +132,7 @@ resource "google_compute_subnetwork" "subnet" { } data "google_compute_image" "debian_9" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } `, context) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_instance.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_instance.go index 4f24e3965e..5498db3c0f 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_instance.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_instance.go @@ -117,8 +117,8 @@ If not specified, this defaults to 100.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", ""}), - Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED"]`, + ValidateFunc: validateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME", ""}), + Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME"]`, }, "container_image": { Type: schema.TypeList, @@ -165,9 +165,9 @@ If not specified, this defaults to 100.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", ""}), + ValidateFunc: validateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME", ""}), DiffSuppressFunc: emptyOrDefaultStringSuppress("DISK_TYPE_UNSPECIFIED"), - Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED"]`, + Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME"]`, }, "disk_encryption": { Type: schema.TypeString, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime.go index e5fb4651ae..43115bc4f2 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime.go @@ -75,7 +75,7 @@ func resourceNotebooksRuntime() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: `The name specified for the Notebook instance.`, + Description: `The name specified for the Notebook runtime.`, }, "access_config": { Type: schema.TypeList, @@ -143,6 +143,26 @@ Default: 180 minutes`, Optional: true, Description: `Install Nvidia Driver automatically.`, }, + "kernels": { + Type: schema.TypeList, + Optional: true, + Description: `Use a list of container images to use as Kernels in the notebook instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repository": { + Type: schema.TypeString, + Required: true, + Description: `The path to the container image repository. +For example: gcr.io/{project_id}/{imageName}`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `The tag of the container image. If not specified, this defaults to the latest tag.`, + }, + }, + }, + }, "notebook_upgrade_schedule": { Type: schema.TypeString, Optional: true, @@ -156,6 +176,17 @@ Please follow the [cron format](https://en.wikipedia.org/wiki/Cron).`, fully boots up. The path must be a URL or Cloud Storage path (gs://path-to-file/file-name).`, }, + "post_startup_script_behavior": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"POST_STARTUP_SCRIPT_BEHAVIOR_UNSPECIFIED", "RUN_EVERY_START", "DOWNLOAD_AND_RUN_EVERY_START", ""}), + Description: `Behavior for the post startup script. Possible values: ["POST_STARTUP_SCRIPT_BEHAVIOR_UNSPECIFIED", "RUN_EVERY_START", "DOWNLOAD_AND_RUN_EVERY_START"]`, + }, + "upgradeable": { + Type: schema.TypeBool, + Computed: true, + Description: `Bool indicating whether an newer image is available in an image family.`, + }, }, }, }, @@ -357,6 +388,7 @@ rest/v1/projects.locations.runtimes#AcceleratorType'`, Type: schema.TypeList, Computed: true, Optional: true, + ForceNew: true, Description: `Use a list of container images to start the notebook instance.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -377,6 +409,7 @@ For example: gcr.io/{project_id}/{imageName}`, "encryption_config": { Type: schema.TypeList, Optional: true, + ForceNew: true, Description: `Encryption settings for virtual machine data disk.`, MaxItems: 1, Elem: &schema.Resource{ @@ -396,6 +429,7 @@ It has the following format: "internal_ip_only": { Type: schema.TypeBool, Optional: true, + ForceNew: true, Description: `If true, runtime will only have internal IP addresses. By default, runtimes are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each vm. This @@ -429,6 +463,7 @@ _metadata)).`, "network": { Type: schema.TypeString, Optional: true, + ForceNew: true, Description: `The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork. If neither 'network' nor 'subnet' is specified, the "default" network of the project is @@ -447,13 +482,22 @@ Runtimes support the following network configurations: "nic_type": { Type: schema.TypeString, Optional: true, + ForceNew: true, ValidateFunc: validateEnum([]string{"UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC", ""}), Description: `The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. Possible values: ["UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC"]`, + }, + "reserved_ip_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Reserved IP Range name is used for VPC Peering. The +subnetwork allocation will use the range *name* if it's assigned.`, }, "shielded_instance_config": { Type: schema.TypeList, Optional: true, + ForceNew: true, Description: `Shielded VM Instance configuration settings.`, MaxItems: 1, Elem: &schema.Resource{ @@ -489,6 +533,7 @@ default.`, "subnet": { Type: schema.TypeString, Optional: true, + ForceNew: true, Description: `The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network. A full URL or partial URI are valid. Examples: @@ -756,13 +801,35 @@ func resourceNotebooksRuntimeUpdate(d *schema.ResourceData, meta interface{}) er } log.Printf("[DEBUG] Updating Runtime %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("virtual_machine") { + updateMask = append(updateMask, "virtualMachine") + } + + if d.HasChange("access_config") { + updateMask = append(updateMask, "accessConfig") + } + + if d.HasChange("software_config") { + updateMask = append(updateMask, "softwareConfig.idleShutdown", + "softwareConfig.idleShutdownTimeout", + "softwareConfig.customGpuDriverPath", + "softwareConfig.postStartupScript") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } // err == nil indicates that the billing_project value was found if bp, err := getBillingProject(d, config); err == nil { billingProject = bp } - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) if err != nil { return fmt.Errorf("Error updating Runtime %q: %s", d.Id(), err) @@ -910,6 +977,8 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfig(v interface{}, d flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigLabels(original["labels"], d, config) transformed["nic_type"] = flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigNicType(original["nicType"], d, config) + transformed["reserved_ip_range"] = + flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(original["reservedIpRange"], d, config) return []interface{}{transformed} } func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -1207,6 +1276,10 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigNicType(v interfac return v } +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenNotebooksRuntimeState(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1263,10 +1336,16 @@ func flattenNotebooksRuntimeSoftwareConfig(v interface{}, d *schema.ResourceData flattenNotebooksRuntimeSoftwareConfigIdleShutdownTimeout(original["idleShutdownTimeout"], d, config) transformed["install_gpu_driver"] = flattenNotebooksRuntimeSoftwareConfigInstallGpuDriver(original["installGpuDriver"], d, config) + transformed["upgradeable"] = + flattenNotebooksRuntimeSoftwareConfigUpgradeable(original["upgradeable"], d, config) transformed["custom_gpu_driver_path"] = flattenNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(original["customGpuDriverPath"], d, config) transformed["post_startup_script"] = flattenNotebooksRuntimeSoftwareConfigPostStartupScript(original["postStartupScript"], d, config) + transformed["post_startup_script_behavior"] = + flattenNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(original["postStartupScriptBehavior"], d, config) + transformed["kernels"] = + flattenNotebooksRuntimeSoftwareConfigKernels(original["kernels"], d, config) return []interface{}{transformed} } func flattenNotebooksRuntimeSoftwareConfigNotebookUpgradeSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -1302,6 +1381,10 @@ func flattenNotebooksRuntimeSoftwareConfigInstallGpuDriver(v interface{}, d *sch return v } +func flattenNotebooksRuntimeSoftwareConfigUpgradeable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1310,6 +1393,37 @@ func flattenNotebooksRuntimeSoftwareConfigPostStartupScript(v interface{}, d *sc return v } +func flattenNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenNotebooksRuntimeSoftwareConfigKernels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "repository": flattenNotebooksRuntimeSoftwareConfigKernelsRepository(original["repository"], d, config), + "tag": flattenNotebooksRuntimeSoftwareConfigKernelsTag(original["tag"], d, config), + }) + } + return transformed +} +func flattenNotebooksRuntimeSoftwareConfigKernelsRepository(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenNotebooksRuntimeSoftwareConfigKernelsTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenNotebooksRuntimeMetrics(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -1482,6 +1596,13 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfig(v interface{}, d T transformed["nicType"] = transformedNicType } + transformedReservedIpRange, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(original["reserved_ip_range"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReservedIpRange); val.IsValid() && !isEmptyValue(val) { + transformed["reservedIpRange"] = transformedReservedIpRange + } + return transformed, nil } @@ -1899,6 +2020,10 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigNicType(v interface return v, nil } +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandNotebooksRuntimeAccessConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -1988,6 +2113,13 @@ func expandNotebooksRuntimeSoftwareConfig(v interface{}, d TerraformResourceData transformed["installGpuDriver"] = transformedInstallGpuDriver } + transformedUpgradeable, err := expandNotebooksRuntimeSoftwareConfigUpgradeable(original["upgradeable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUpgradeable); val.IsValid() && !isEmptyValue(val) { + transformed["upgradeable"] = transformedUpgradeable + } + transformedCustomGpuDriverPath, err := expandNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(original["custom_gpu_driver_path"], d, config) if err != nil { return nil, err @@ -2002,6 +2134,20 @@ func expandNotebooksRuntimeSoftwareConfig(v interface{}, d TerraformResourceData transformed["postStartupScript"] = transformedPostStartupScript } + transformedPostStartupScriptBehavior, err := expandNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(original["post_startup_script_behavior"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPostStartupScriptBehavior); val.IsValid() && !isEmptyValue(val) { + transformed["postStartupScriptBehavior"] = transformedPostStartupScriptBehavior + } + + transformedKernels, err := expandNotebooksRuntimeSoftwareConfigKernels(original["kernels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKernels); val.IsValid() && !isEmptyValue(val) { + transformed["kernels"] = transformedKernels + } + return transformed, nil } @@ -2025,6 +2171,10 @@ func expandNotebooksRuntimeSoftwareConfigInstallGpuDriver(v interface{}, d Terra return v, nil } +func expandNotebooksRuntimeSoftwareConfigUpgradeable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -2032,3 +2182,44 @@ func expandNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(v interface{}, d Te func expandNotebooksRuntimeSoftwareConfigPostStartupScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } + +func expandNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksRuntimeSoftwareConfigKernels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRepository, err := expandNotebooksRuntimeSoftwareConfigKernelsRepository(original["repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !isEmptyValue(val) { + transformed["repository"] = transformedRepository + } + + transformedTag, err := expandNotebooksRuntimeSoftwareConfigKernelsTag(original["tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { + transformed["tag"] = transformedTag + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNotebooksRuntimeSoftwareConfigKernelsRepository(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksRuntimeSoftwareConfigKernelsTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime_generated_test.go index 7878ff30da..8c114fd063 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime_generated_test.go @@ -185,6 +185,113 @@ resource "google_notebooks_runtime" "runtime_container" { `, context) } +func TestAccNotebooksRuntime_notebookRuntimeKernelsExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNotebooksRuntimeDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNotebooksRuntime_notebookRuntimeKernelsExample(context), + }, + { + ResourceName: "google_notebooks_runtime.runtime_container", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccNotebooksRuntime_notebookRuntimeKernelsExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_notebooks_runtime" "runtime_container" { + name = "tf-test-notebooks-runtime-kernel%{random_suffix}" + location = "us-central1" + access_config { + access_type = "SINGLE_USER" + runtime_owner = "admin@hashicorptest.com" + } + software_config { + kernels { + repository = "gcr.io/deeplearning-platform-release/base-cpu" + tag = "latest" + } + } + virtual_machine { + virtual_machine_config { + machine_type = "n1-standard-4" + data_disk { + initialize_params { + disk_size_gb = "100" + disk_type = "PD_STANDARD" + } + } + } + } +} +`, context) +} + +func TestAccNotebooksRuntime_notebookRuntimeScriptExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNotebooksRuntimeDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNotebooksRuntime_notebookRuntimeScriptExample(context), + }, + { + ResourceName: "google_notebooks_runtime.runtime_container", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccNotebooksRuntime_notebookRuntimeScriptExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_notebooks_runtime" "runtime_container" { + name = "tf-test-notebooks-runtime-script%{random_suffix}" + location = "us-central1" + access_config { + access_type = "SINGLE_USER" + runtime_owner = "admin@hashicorptest.com" + } + software_config { + post_startup_script_behavior = "RUN_EVERY_START" + } + virtual_machine { + virtual_machine_config { + machine_type = "n1-standard-4" + data_disk { + initialize_params { + disk_size_gb = "100" + disk_type = "PD_STANDARD" + } + } + } + } +} +`, context) +} + func testAccCheckNotebooksRuntimeDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime_test.go new file mode 100644 index 0000000000..d4d90657b3 --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime_test.go @@ -0,0 +1,99 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccNotebooksRuntime_update(t *testing.T) { + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNotebooksRuntimeDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNotebooksRuntime_basic(context), + }, + { + ResourceName: "google_notebooks_runtime.runtime", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccNotebooksRuntime_update(context), + }, + { + ResourceName: "google_notebooks_runtime.runtime", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccNotebooksRuntime_basic(context), + }, + { + ResourceName: "google_notebooks_runtime.runtime", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccNotebooksRuntime_basic(context map[string]interface{}) string { + return Nprintf(` +resource "google_notebooks_runtime" "runtime" { + name = "tf-test-notebooks-runtime%{random_suffix}" + location = "us-central1" + access_config { + access_type = "SINGLE_USER" + runtime_owner = "admin@hashicorptest.com" + } + software_config {} + virtual_machine { + virtual_machine_config { + machine_type = "n1-standard-4" + data_disk { + initialize_params { + disk_size_gb = "100" + disk_type = "PD_STANDARD" + } + } + reserved_ip_range = "192.168.255.0/24" + } + } +} +`, context) +} + +func testAccNotebooksRuntime_update(context map[string]interface{}) string { + return Nprintf(` +resource "google_notebooks_runtime" "runtime" { + name = "tf-test-notebooks-runtime%{random_suffix}" + location = "us-central1" + access_config { + access_type = "SINGLE_USER" + runtime_owner = "admin@hashicorptest.com" + } + software_config { + idle_shutdown_timeout = "80" + } + virtual_machine { + virtual_machine_config { + machine_type = "n1-standard-4" + data_disk { + initialize_params { + disk_size_gb = "100" + disk_type = "PD_STANDARD" + } + } + reserved_ip_range = "192.168.255.0/24" + } + } +} +`, context) +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_org_policy_policy.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_org_policy_policy.go index b6c6b722f9..7ce8dbfcaa 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_org_policy_policy.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_org_policy_policy.go @@ -212,7 +212,7 @@ func resourceOrgPolicyPolicyCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -229,7 +229,7 @@ func resourceOrgPolicyPolicyCreate(d *schema.ResourceData, meta interface{}) err } else { client.Config.BasePath = bp } - res, err := client.ApplyPolicy(context.Background(), obj, createDirective...) + res, err := client.ApplyPolicy(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_guest_policies_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_guest_policies_generated_test.go index 14d07c9d0a..7561f4e84f 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_guest_policies_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_guest_policies_generated_test.go @@ -52,7 +52,7 @@ func testAccOSConfigGuestPolicies_osConfigGuestPoliciesBasicExample(context map[ return Nprintf(` data "google_compute_image" "my_image" { provider = google-beta - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_os_policy_assignment.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_os_policy_assignment.go index cc653d6e21..760c02191c 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_os_policy_assignment.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_os_policy_assignment.go @@ -1326,12 +1326,12 @@ func resourceOsConfigOsPolicyAssignmentCreate(d *schema.ResourceData, meta inter Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -1349,7 +1349,7 @@ func resourceOsConfigOsPolicyAssignmentCreate(d *schema.ResourceData, meta inter } else { client.Config.BasePath = bp } - res, err := client.ApplyOSPolicyAssignment(context.Background(), obj, createDirective...) + res, err := client.ApplyOSPolicyAssignment(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_patch_deployment_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_patch_deployment_generated_test.go index 3a4fda747a..e9a8f8ea72 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_patch_deployment_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_patch_deployment_generated_test.go @@ -192,7 +192,7 @@ func TestAccOSConfigPatchDeployment_osConfigPatchDeploymentInstanceExample(t *te func testAccOSConfigPatchDeployment_osConfigPatchDeploymentInstanceExample(context map[string]interface{}) string { return Nprintf(` data "google_compute_image" "my_image" { - family = "debian-9" + family = "debian-11" project = "debian-cloud" } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate.go index 13f391704b..6155a88bdc 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate.go @@ -73,10 +73,13 @@ omitted, no template will be used. This template must be in the same location as the Certificate.`, }, "certificate_authority": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Certificate Authority name.`, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Certificate Authority ID that should issue the certificate. For example, to issue a Certificate from +a Certificate Authority with resource name 'projects/my-project/locations/us-central1/caPools/my-pool/certificateAuthorities/my-ca', +argument 'pool' should be set to 'projects/my-project/locations/us-central1/caPools/my-pool', argument 'certificate_authority' +should be set to 'my-ca'.`, }, "config": { Type: schema.TypeList, @@ -1129,7 +1132,7 @@ This is in RFC3339 text format.`, "issuer_certificate_authority": { Type: schema.TypeString, Computed: true, - Description: `The resource name of the issuing CertificateAuthority in the format projects/*/locations/*/caPools/*/certificateAuthorities/*.`, + Description: `The resource name of the issuing CertificateAuthority in the format 'projects/*/locations/*/caPools/*/certificateAuthorities/*'.`, }, "pem_certificate": { Type: schema.TypeString, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority.go index c4cd85e718..1cbef84c57 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority.go @@ -550,6 +550,52 @@ An object containing a list of "key": value pairs. Example: { "name": "wrench", fractional digits, terminated by 's'. Example: "3.5s".`, Default: "315360000s", }, + "pem_ca_certificate": { + Type: schema.TypeString, + Optional: true, + Description: `The signed CA certificate issued from the subordinated CA's CSR. This is needed when activating the subordiante CA with a third party issuer.`, + }, + "subordinate_config": { + Type: schema.TypeList, + Optional: true, + Description: `If this is a subordinate CertificateAuthority, this field will be set +with the subordinate configuration, which describes its issuers.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_authority": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: compareResourceNames, + Description: `This can refer to a CertificateAuthority that was used to create a +subordinate CertificateAuthority. This field is used for information +and usability purposes only. The resource name is in the format +'projects/*/locations/*/caPools/*/certificateAuthorities/*'.`, + ExactlyOneOf: []string{"subordinate_config.0.certificate_authority", "subordinate_config.0.pem_issuer_chain"}, + }, + "pem_issuer_chain": { + Type: schema.TypeList, + Optional: true, + Description: `Contains the PEM certificate chain for the issuers of this CertificateAuthority, +but not pem certificate for this CA itself.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pem_certificates": { + Type: schema.TypeList, + Optional: true, + Description: `Expected to be in leaf-to-root order according to RFC 5246.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ExactlyOneOf: []string{"subordinate_config.0.certificate_authority", "subordinate_config.0.pem_issuer_chain"}, + }, + }, + }, + }, "type": { Type: schema.TypeString, Optional: true, @@ -558,8 +604,7 @@ fractional digits, terminated by 's'. Example: "3.5s".`, Description: `The Type of this CertificateAuthority. ~> **Note:** For 'SUBORDINATE' Certificate Authorities, they need to -be manually activated (via Cloud Console of 'gcloud') before they can -issue certificates. Default value: "SELF_SIGNED" Possible values: ["SELF_SIGNED", "SUBORDINATE"]`, +be activated before they can issue certificates. Default value: "SELF_SIGNED" Possible values: ["SELF_SIGNED", "SUBORDINATE"]`, Default: "SELF_SIGNED", }, "access_urls": { @@ -676,6 +721,12 @@ func resourcePrivatecaCertificateAuthorityCreate(d *schema.ResourceData, meta in } else if v, ok := d.GetOkExists("key_spec"); !isEmptyValue(reflect.ValueOf(keySpecProp)) && (ok || !reflect.DeepEqual(v, keySpecProp)) { obj["keySpec"] = keySpecProp } + subordinateConfigProp, err := expandPrivatecaCertificateAuthoritySubordinateConfig(d.Get("subordinate_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subordinate_config"); !isEmptyValue(reflect.ValueOf(subordinateConfigProp)) && (ok || !reflect.DeepEqual(v, subordinateConfigProp)) { + obj["subordinateConfig"] = subordinateConfigProp + } gcsBucketProp, err := expandPrivatecaCertificateAuthorityGcsBucket(d.Get("gcs_bucket"), d, config) if err != nil { return err @@ -708,6 +759,9 @@ func resourcePrivatecaCertificateAuthorityCreate(d *schema.ResourceData, meta in billingProject = bp } + // Drop `subordinateConfig` as it can not be set during CA creation. + // It can be used to activate CA during post_create or pre_update. + delete(obj, "subordinateConfig") res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) if err != nil { return fmt.Errorf("Error creating CertificateAuthority: %s", err) @@ -753,27 +807,24 @@ func resourcePrivatecaCertificateAuthorityCreate(d *schema.ResourceData, meta in staged := d.Get("type").(string) == "SELF_SIGNED" + if d.Get("type").(string) == "SUBORDINATE" { + if _, ok := d.GetOk("subordinate_config"); ok { + // First party issuer + log.Printf("[DEBUG] Activating CertificateAuthority with first party issuer") + if err := activateSubCAWithFirstPartyIssuer(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error activating subordinate CA with first party issuer: %v", err) + } + staged = true + log.Printf("[DEBUG] CertificateAuthority activated") + } + } + // Enable the CA if `desired_state` is unspecified or specified as `ENABLED`. if p, ok := d.GetOk("desired_state"); !ok || p.(string) == "ENABLED" { // Skip enablement on SUBORDINATE CA for backward compatible. if staged { - url, err = replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:enable") - if err != nil { - return err - } - - log.Printf("[DEBUG] Enabling CertificateAuthority: %#v", obj) - - res, err = sendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return fmt.Errorf("Error enabling CertificateAuthority: %s", err) - } - - err = privatecaOperationWaitTimeWithResponse( - config, res, &opRes, project, "Enabling CertificateAuthority", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error waiting to enable CertificateAuthority: %s", err) + if err := enableCA(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %v", err) } } } @@ -850,6 +901,9 @@ func resourcePrivatecaCertificateAuthorityRead(d *schema.ResourceData, meta inte if err := d.Set("key_spec", flattenPrivatecaCertificateAuthorityKeySpec(res["keySpec"], d, config)); err != nil { return fmt.Errorf("Error reading CertificateAuthority: %s", err) } + if err := d.Set("subordinate_config", flattenPrivatecaCertificateAuthoritySubordinateConfig(res["subordinateConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateAuthority: %s", err) + } if err := d.Set("state", flattenPrivatecaCertificateAuthorityState(res["state"], d, config)); err != nil { return fmt.Errorf("Error reading CertificateAuthority: %s", err) } @@ -891,6 +945,12 @@ func resourcePrivatecaCertificateAuthorityUpdate(d *schema.ResourceData, meta in billingProject = project obj := make(map[string]interface{}) + subordinateConfigProp, err := expandPrivatecaCertificateAuthoritySubordinateConfig(d.Get("subordinate_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subordinate_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, subordinateConfigProp)) { + obj["subordinateConfig"] = subordinateConfigProp + } labelsProp, err := expandPrivatecaCertificateAuthorityLabels(d.Get("labels"), d, config) if err != nil { return err @@ -906,6 +966,10 @@ func resourcePrivatecaCertificateAuthorityUpdate(d *schema.ResourceData, meta in log.Printf("[DEBUG] Updating CertificateAuthority %q: %#v", d.Id(), obj) updateMask := []string{} + if d.HasChange("subordinate_config") { + updateMask = append(updateMask, "subordinateConfig") + } + if d.HasChange("labels") { updateMask = append(updateMask, "labels") } @@ -915,50 +979,43 @@ func resourcePrivatecaCertificateAuthorityUpdate(d *schema.ResourceData, meta in if err != nil { return err } + if d.HasChange("subordinate_config") { + if d.Get("type").(string) != "SUBORDINATE" { + return fmt.Errorf("`subordinate_config` can only be configured on subordinate CA") + } + + // Activate subordinate CA in `AWAITING_USER_ACTIVATION` state. + if d.Get("state") == "AWAITING_USER_ACTIVATION" { + if _, ok := d.GetOk("pem_ca_certificate"); ok { + // Third party issuer + log.Printf("[DEBUG] Activating CertificateAuthority with third party issuer") + if err := activateSubCAWithThirdPartyIssuer(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error activating subordinate CA with third party issuer: %v", err) + } + } else { + // First party issuer + log.Printf("[DEBUG] Activating CertificateAuthority with first party issuer") + if err := activateSubCAWithFirstPartyIssuer(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error activating subordinate CA with first party issuer: %v", err) + } + } + log.Printf("[DEBUG] CertificateAuthority activated") + } + } + + log.Printf("[DEBUG] checking desired_state") if d.HasChange("desired_state") { // Currently, most CA state update operations are not idempotent. // Try to change state only if the current `state` does not match the `desired_state`. if p, ok := d.GetOk("desired_state"); ok && p.(string) != d.Get("state").(string) { switch p.(string) { case "ENABLED": - enableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:enable") - if err != nil { - return err - } - - log.Printf("[DEBUG] Enabling CA: %#v", obj) - - res, err := sendRequest(config, "POST", billingProject, enableUrl, userAgent, nil) - if err != nil { - return fmt.Errorf("Error enabling CA: %s", err) - } - - var opRes map[string]interface{} - err = privatecaOperationWaitTimeWithResponse( - config, res, &opRes, project, "Enabling CA", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error waiting to enable CA: %s", err) + if err := enableCA(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %v", err) } case "DISABLED": - disableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:disable") - if err != nil { - return err - } - - log.Printf("[DEBUG] Disabling CA: %#v", obj) - - dRes, err := sendRequest(config, "POST", billingProject, disableUrl, userAgent, nil) - if err != nil { - return fmt.Errorf("Error disabling CA: %s", err) - } - - var opRes map[string]interface{} - err = privatecaOperationWaitTimeWithResponse( - config, dRes, &opRes, project, "Disabling CA", userAgent, - d.Timeout(schema.TimeoutDelete)) - if err != nil { - return fmt.Errorf("Error waiting to disable CA: %s", err) + if err := disableCA(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error disabling CertificateAuthority: %v", err) } default: return fmt.Errorf("Unsupported value in field `desired_state`") @@ -1266,6 +1323,42 @@ func flattenPrivatecaCertificateAuthorityKeySpecAlgorithm(v interface{}, d *sche return v } +func flattenPrivatecaCertificateAuthoritySubordinateConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["certificate_authority"] = + flattenPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(original["certificateAuthority"], d, config) + transformed["pem_issuer_chain"] = + flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(original["pemIssuerChain"], d, config) + return []interface{}{transformed} +} +func flattenPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pem_certificates"] = + flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(original["pemCertificates"], d, config) + return []interface{}{transformed} +} +func flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenPrivatecaCertificateAuthorityState(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1611,6 +1704,59 @@ func expandPrivatecaCertificateAuthorityKeySpecAlgorithm(v interface{}, d Terraf return v, nil } +func expandPrivatecaCertificateAuthoritySubordinateConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCertificateAuthority, err := expandPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(original["certificate_authority"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertificateAuthority); val.IsValid() && !isEmptyValue(val) { + transformed["certificateAuthority"] = transformedCertificateAuthority + } + + transformedPemIssuerChain, err := expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(original["pem_issuer_chain"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPemIssuerChain); val.IsValid() && !isEmptyValue(val) { + transformed["pemIssuerChain"] = transformedPemIssuerChain + } + + return transformed, nil +} + +func expandPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPemCertificates, err := expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(original["pem_certificates"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPemCertificates); val.IsValid() && !isEmptyValue(val) { + transformed["pemCertificates"] = transformedPemCertificates + } + + return transformed, nil +} + +func expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandPrivatecaCertificateAuthorityGcsBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority_generated_test.go index 8c82b2532e..d3cf95f2b2 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority_generated_test.go @@ -45,7 +45,7 @@ func TestAccPrivatecaCertificateAuthority_privatecaCertificateAuthorityBasicExam ResourceName: "google_privateca_certificate_authority.default", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"ignore_active_certificates_on_deletion", "location", "certificate_authority_id", "pool", "deletion_protection"}, + ImportStateVerifyIgnore: []string{"pem_ca_certificate", "ignore_active_certificates_on_deletion", "location", "certificate_authority_id", "pool", "deletion_protection"}, }, }, }) @@ -126,7 +126,7 @@ func TestAccPrivatecaCertificateAuthority_privatecaCertificateAuthoritySubordina ResourceName: "google_privateca_certificate_authority.default", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"ignore_active_certificates_on_deletion", "location", "certificate_authority_id", "pool", "deletion_protection"}, + ImportStateVerifyIgnore: []string{"pem_ca_certificate", "ignore_active_certificates_on_deletion", "location", "certificate_authority_id", "pool", "deletion_protection"}, }, }, }) @@ -134,13 +134,54 @@ func TestAccPrivatecaCertificateAuthority_privatecaCertificateAuthoritySubordina func testAccPrivatecaCertificateAuthority_privatecaCertificateAuthoritySubordinateExample(context map[string]interface{}) string { return Nprintf(` +resource "google_privateca_certificate_authority" "root-ca" { + pool = "%{pool_name}" + certificate_authority_id = "tf-test-my-certificate-authority%{random_suffix}-root" + location = "us-central1" + deletion_protection = false + ignore_active_certificates_on_deletion = true + config { + subject_config { + subject { + organization = "HashiCorp" + common_name = "my-certificate-authority" + } + subject_alt_name { + dns_names = ["hashicorp.com"] + } + } + x509_config { + ca_options { + # is_ca *MUST* be true for certificate authorities + is_ca = true + } + key_usage { + base_key_usage { + # cert_sign and crl_sign *MUST* be true for certificate authorities + cert_sign = true + crl_sign = true + } + extended_key_usage { + server_auth = false + } + } + } + } + key_spec { + algorithm = "RSA_PKCS1_4096_SHA256" + } +} + resource "google_privateca_certificate_authority" "default" { // This example assumes this pool already exists. // Pools cannot be deleted in normal test circumstances, so we depend on static pools pool = "%{pool_name}" - certificate_authority_id = "tf-test-my-certificate-authority%{random_suffix}" + certificate_authority_id = "tf-test-my-certificate-authority%{random_suffix}-sub" location = "%{pool_location}" deletion_protection = "%{deletion_protection}" + subordinate_config { + certificate_authority = google_privateca_certificate_authority.root-ca.name + } config { subject_config { subject { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority_test.go index 18cdc7f421..24c5f8fbdb 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) -func TestAccPrivatecaCertificateAuthority_rootCaIsEnabledByDefault(t *testing.T) { +func TestAccPrivatecaCertificateAuthority_privatecaCertificateAuthorityUpdate(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -16,7 +16,6 @@ func TestAccPrivatecaCertificateAuthority_rootCaIsEnabledByDefault(t *testing.T) "random_suffix": randString(t, 10), } - resourceName := "google_privateca_certificate_authority.default" vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -24,85 +23,12 @@ func TestAccPrivatecaCertificateAuthority_rootCaIsEnabledByDefault(t *testing.T) Steps: []resource.TestStep{ { Config: testAccPrivatecaCertificateAuthority_privatecaCertificateAuthorityBasicRoot(context), + // we added a `desired_state` field in https://github.com/GoogleCloudPlatform/magic-modules/pull/5934, this ensures + // we don't regress and that CAs are enabled by default Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "state", "ENABLED"), + resource.TestCheckResourceAttr("google_privateca_certificate_authority.default", "state", "ENABLED"), ), }, - }, - }) -} - -func TestAccPrivatecaCertificateAuthority_rootCaCreatedInStaged(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "pool_name": BootstrapSharedCaPoolInLocation(t, "us-central1"), - "pool_location": "us-central1", - "deletion_protection": false, - "random_suffix": randString(t, 10), - "desired_state": "STAGED", - } - - resourceName := "google_privateca_certificate_authority.default" - vcrTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPrivatecaCertificateAuthorityDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccPrivatecaCertificateAuthority_privatecaCertificateAuthorityWithDesiredState(context), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "state", "STAGED"), - ), - }, - }, - }) -} - -func TestAccPrivatecaCertificateAuthority_subordinateCaCreatedInAwaitingUserActivation(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "pool_name": BootstrapSharedCaPoolInLocation(t, "us-central1"), - "pool_location": "us-central1", - "deletion_protection": false, - "random_suffix": randString(t, 10), - } - - resourceName := "google_privateca_certificate_authority.default" - vcrTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPrivatecaCertificateAuthorityDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccPrivatecaCertificateAuthority_privatecaCertificateAuthorityBasicSubordinate(context), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "state", "AWAITING_USER_ACTIVATION"), - ), - }, - }, - }) -} - -func TestAccPrivatecaCertificateAuthority_privatecaCertificateAuthorityUpdate(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "pool_name": BootstrapSharedCaPoolInLocation(t, "us-central1"), - "pool_location": "us-central1", - "deletion_protection": false, - "random_suffix": randString(t, 10), - } - - vcrTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPrivatecaCertificateAuthorityDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccPrivatecaCertificateAuthority_privatecaCertificateAuthorityBasicRoot(context), - }, { ResourceName: "google_privateca_certificate_authority.default", ImportState: true, @@ -355,57 +281,3 @@ resource "google_privateca_certificate_authority" "default" { } `, context) } - -func testAccPrivatecaCertificateAuthority_privatecaCertificateAuthorityBasicSubordinate(context map[string]interface{}) string { - return Nprintf(` -resource "google_privateca_certificate_authority" "default" { - // This example assumes this pool already exists. - // Pools cannot be deleted in normal test circumstances, so we depend on static pools - pool = "%{pool_name}" - certificate_authority_id = "tf-test-my-certificate-authority-%{random_suffix}" - location = "%{pool_location}" - deletion_protection = false - config { - subject_config { - subject { - organization = "HashiCorp" - common_name = "my-certificate-authority" - } - subject_alt_name { - dns_names = ["hashicorp.com"] - } - } - x509_config { - ca_options { - is_ca = true - max_issuer_path_length = 10 - } - key_usage { - base_key_usage { - digital_signature = true - content_commitment = true - key_encipherment = false - data_encipherment = true - key_agreement = true - cert_sign = true - crl_sign = true - decipher_only = true - } - extended_key_usage { - server_auth = true - client_auth = false - email_protection = true - code_signing = true - time_stamping = true - } - } - } - } - lifetime = "86400s" - key_spec { - algorithm = "RSA_PKCS1_4096_SHA256" - } - type = "SUBORDINATE" -} -`, context) -} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_template.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_template.go index e745d202f2..d25f08062d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_template.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_template.go @@ -484,12 +484,12 @@ func resourcePrivatecaCertificateTemplateCreate(d *schema.ResourceData, meta int Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/certificateTemplates/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -506,7 +506,7 @@ func resourcePrivatecaCertificateTemplateCreate(d *schema.ResourceData, meta int } else { client.Config.BasePath = bp } - res, err := client.ApplyCertificateTemplate(context.Background(), obj, createDirective...) + res, err := client.ApplyCertificateTemplate(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -1227,7 +1227,6 @@ func flattenPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArra } return items } - func expandPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArray(o interface{}) []privateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnum { objs := o.([]interface{}) items := make([]privateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnum, 0, len(objs)) diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_project_service_identity.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_project_service_identity.go index 23e46b1438..6645fa096e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_project_service_identity.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_project_service_identity.go @@ -34,6 +34,7 @@ func resourceProjectServiceIdentity() *schema.Resource { }, "email": { Type: schema.TypeString, + Optional: true, Computed: true, }, }, @@ -86,16 +87,16 @@ func resourceProjectServiceIdentityCreate(d *schema.ResourceData, meta interface } d.SetId(id) - emailVal, ok := opRes["email"] - if !ok { - return fmt.Errorf("response %v missing 'email'", opRes) - } - email, ok := emailVal.(string) - if !ok { - return fmt.Errorf("unexpected type for email: got %T, want string", email) - } - if err := d.Set("email", email); err != nil { - return fmt.Errorf("Error setting email: %s", err) + // This API may not return the service identity's details, even if the relevant + // Google API is configured for service identities. + if emailVal, ok := opRes["email"]; ok { + email, ok := emailVal.(string) + if !ok { + return fmt.Errorf("unexpected type for email: got %T, want string", email) + } + if err := d.Set("email", email); err != nil { + return fmt.Errorf("Error setting email: %s", err) + } } return nil } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription.go index 9e096c4f09..af594eda8e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription.go @@ -91,6 +91,41 @@ for the call to the push endpoint. If the subscriber never acknowledges the message, the Pub/Sub system will eventually redeliver the message.`, }, + "bigquery_config": { + Type: schema.TypeList, + Optional: true, + Description: `If delivery to BigQuery is used with this subscription, this field is used to configure it. +Either pushConfig or bigQueryConfig can be set, but not both. +If both are empty, then the subscriber will pull and ack messages using API methods.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Required: true, + Description: `The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId}`, + }, + "drop_unknown_fields": { + Type: schema.TypeBool, + Optional: true, + Description: `When true and useTopicSchema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. +Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog.`, + }, + "use_topic_schema": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, use the topic's schema as the columns to write to in BigQuery, if it exists.`, + }, + "write_metadata": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, write the subscription name, messageId, publishTime, attributes, and orderingKey to additional columns in the table. +The subscription name, messageId, and publishTime fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column.`, + }, + }, + }, + ConflictsWith: []string{"push_config"}, + }, "dead_letter_policy": { Type: schema.TypeList, Optional: true, @@ -293,6 +328,7 @@ Note: if not specified, the Push endpoint URL will be used.`, }, }, }, + ConflictsWith: []string{"bigquery_config"}, }, "retain_acked_messages": { Type: schema.TypeBool, @@ -368,6 +404,12 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } + bigqueryConfigProp, err := expandPubsubSubscriptionBigqueryConfig(d.Get("bigquery_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bigquery_config"); !isEmptyValue(reflect.ValueOf(bigqueryConfigProp)) && (ok || !reflect.DeepEqual(v, bigqueryConfigProp)) { + obj["bigqueryConfig"] = bigqueryConfigProp + } pushConfigProp, err := expandPubsubSubscriptionPushConfig(d.Get("push_config"), d, config) if err != nil { return err @@ -553,6 +595,9 @@ func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) er if err := d.Set("labels", flattenPubsubSubscriptionLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Subscription: %s", err) } + if err := d.Set("bigquery_config", flattenPubsubSubscriptionBigqueryConfig(res["bigqueryConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Subscription: %s", err) + } if err := d.Set("push_config", flattenPubsubSubscriptionPushConfig(res["pushConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Subscription: %s", err) } @@ -609,6 +654,12 @@ func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } + bigqueryConfigProp, err := expandPubsubSubscriptionBigqueryConfig(d.Get("bigquery_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bigquery_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bigqueryConfigProp)) { + obj["bigqueryConfig"] = bigqueryConfigProp + } pushConfigProp, err := expandPubsubSubscriptionPushConfig(d.Get("push_config"), d, config) if err != nil { return err @@ -669,6 +720,10 @@ func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) updateMask = append(updateMask, "labels") } + if d.HasChange("bigquery_config") { + updateMask = append(updateMask, "bigqueryConfig") + } + if d.HasChange("push_config") { updateMask = append(updateMask, "pushConfig") } @@ -794,6 +849,41 @@ func flattenPubsubSubscriptionLabels(v interface{}, d *schema.ResourceData, conf return v } +func flattenPubsubSubscriptionBigqueryConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["table"] = + flattenPubsubSubscriptionBigqueryConfigTable(original["table"], d, config) + transformed["use_topic_schema"] = + flattenPubsubSubscriptionBigqueryConfigUseTopicSchema(original["useTopicSchema"], d, config) + transformed["write_metadata"] = + flattenPubsubSubscriptionBigqueryConfigWriteMetadata(original["writeMetadata"], d, config) + transformed["drop_unknown_fields"] = + flattenPubsubSubscriptionBigqueryConfigDropUnknownFields(original["dropUnknownFields"], d, config) + return []interface{}{transformed} +} +func flattenPubsubSubscriptionBigqueryConfigTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenPubsubSubscriptionBigqueryConfigUseTopicSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenPubsubSubscriptionBigqueryConfigWriteMetadata(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenPubsubSubscriptionBigqueryConfigDropUnknownFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenPubsubSubscriptionPushConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -989,6 +1079,62 @@ func expandPubsubSubscriptionLabels(v interface{}, d TerraformResourceData, conf return m, nil } +func expandPubsubSubscriptionBigqueryConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandPubsubSubscriptionBigqueryConfigTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + transformed["table"] = transformedTable + } + + transformedUseTopicSchema, err := expandPubsubSubscriptionBigqueryConfigUseTopicSchema(original["use_topic_schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUseTopicSchema); val.IsValid() && !isEmptyValue(val) { + transformed["useTopicSchema"] = transformedUseTopicSchema + } + + transformedWriteMetadata, err := expandPubsubSubscriptionBigqueryConfigWriteMetadata(original["write_metadata"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWriteMetadata); val.IsValid() && !isEmptyValue(val) { + transformed["writeMetadata"] = transformedWriteMetadata + } + + transformedDropUnknownFields, err := expandPubsubSubscriptionBigqueryConfigDropUnknownFields(original["drop_unknown_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDropUnknownFields); val.IsValid() && !isEmptyValue(val) { + transformed["dropUnknownFields"] = transformedDropUnknownFields + } + + return transformed, nil +} + +func expandPubsubSubscriptionBigqueryConfigTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandPubsubSubscriptionBigqueryConfigUseTopicSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandPubsubSubscriptionBigqueryConfigWriteMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandPubsubSubscriptionBigqueryConfigDropUnknownFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandPubsubSubscriptionPushConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription_generated_test.go index 7d56755ab2..527c817507 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription_generated_test.go @@ -179,6 +179,86 @@ resource "google_pubsub_subscription" "example" { `, context) } +func TestAccPubsubSubscription_pubsubSubscriptionPushBqExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPubsubSubscriptionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccPubsubSubscription_pubsubSubscriptionPushBqExample(context), + }, + { + ResourceName: "google_pubsub_subscription.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"topic"}, + }, + }, + }) +} + +func testAccPubsubSubscription_pubsubSubscriptionPushBqExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_pubsub_topic" "example" { + name = "tf-test-example-topic%{random_suffix}" +} + +resource "google_pubsub_subscription" "example" { + name = "tf-test-example-subscription%{random_suffix}" + topic = google_pubsub_topic.example.name + + bigquery_config { + table = "${google_bigquery_table.test.project}.${google_bigquery_table.test.dataset_id}.${google_bigquery_table.test.table_id}" + } + + depends_on = [google_project_iam_member.viewer, google_project_iam_member.editor] +} + +data "google_project" "project" { +} + +resource "google_project_iam_member" "viewer" { + project = data.google_project.project.project_id + role = "roles/bigquery.metadataViewer" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com" +} + +resource "google_project_iam_member" "editor" { + project = data.google_project.project.project_id + role = "roles/bigquery.dataEditor" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com" +} + +resource "google_bigquery_dataset" "test" { + dataset_id = "tf_test_example_dataset%{random_suffix}" +} + +resource "google_bigquery_table" "test" { + deletion_protection = false + table_id = "tf_test_example_table%{random_suffix}" + dataset_id = google_bigquery_dataset.test.dataset_id + + schema = < Make sure that 'limit' is in a format that doesn't start with '1/' or contain curly braces. +E.g. use '/project/user' instead of '1/{project}/{user}'.`, }, "metric": { Type: schema.TypeString, diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database.go index 71e2a1a160..08c3415e9e 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database.go @@ -19,6 +19,8 @@ import ( "fmt" "log" "reflect" + "regexp" + "strconv" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -56,6 +58,44 @@ func resourceSpannerDBDdlCustomDiff(_ context.Context, diff *schema.ResourceDiff return resourceSpannerDBDdlCustomDiffFunc(diff) } +func validateDatabaseRetentionPeriod(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + valueError := fmt.Errorf("version_retention_period should be in range [1h, 7d], in a format resembling 1d, 24h, 1440m, or 86400s") + + r := regexp.MustCompile("^(\\d{1}d|\\d{1,3}h|\\d{2,5}m|\\d{4,6}s)$") + if !r.MatchString(value) { + errors = append(errors, valueError) + return + } + + unit := value[len(value)-1:] + multiple := value[:len(value)-1] + num, err := strconv.Atoi(multiple) + if err != nil { + errors = append(errors, valueError) + return + } + + if unit == "d" && (num < 1 || num > 7) { + errors = append(errors, valueError) + return + } + if unit == "h" && (num < 1 || num > 7*24) { + errors = append(errors, valueError) + return + } + if unit == "m" && (num < 1*60 || num > 7*24*60) { + errors = append(errors, valueError) + return + } + if unit == "s" && (num < 1*60*60 || num > 7*24*60*60) { + errors = append(errors, valueError) + return + } + + return +} + func resourceSpannerDatabase() *schema.Resource { return &schema.Resource{ Create: resourceSpannerDatabaseCreate, @@ -98,10 +138,7 @@ the instance is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].`, ForceNew: true, ValidateFunc: validateEnum([]string{"GOOGLE_STANDARD_SQL", "POSTGRESQL", ""}), Description: `The dialect of the Cloud Spanner Database. -If it is not provided, "GOOGLE_STANDARD_SQL" will be used. -Note: Databases that are created with POSTGRESQL dialect do not support -extra DDL statements in the 'CreateDatabase' call. You must therefore re-apply -terraform with ddl on the same database after creation. Possible values: ["GOOGLE_STANDARD_SQL", "POSTGRESQL"]`, +If it is not provided, "GOOGLE_STANDARD_SQL" will be used. Possible values: ["GOOGLE_STANDARD_SQL", "POSTGRESQL"]`, }, "ddl": { Type: schema.TypeList, @@ -132,6 +169,17 @@ in the same location as the Spanner Database.`, }, }, }, + "version_retention_period": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validateDatabaseRetentionPeriod, + Description: `The retention period for the database. The retention period must be between 1 hour +and 7 days, and can be specified in days, hours, minutes, or seconds. For example, +the values 1d, 24h, 1440m, and 86400s are equivalent. Default value is 1h. +If this property is used, you must avoid adding new DDL statements to 'ddl' that +update the database's version_retention_period.`, + }, "state": { Type: schema.TypeString, Computed: true, @@ -167,6 +215,12 @@ func resourceSpannerDatabaseCreate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } + versionRetentionPeriodProp, err := expandSpannerDatabaseVersionRetentionPeriod(d.Get("version_retention_period"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_retention_period"); !isEmptyValue(reflect.ValueOf(versionRetentionPeriodProp)) && (ok || !reflect.DeepEqual(v, versionRetentionPeriodProp)) { + obj["versionRetentionPeriod"] = versionRetentionPeriodProp + } extraStatementsProp, err := expandSpannerDatabaseDdl(d.Get("ddl"), d, config) if err != nil { return err @@ -259,6 +313,69 @@ func resourceSpannerDatabaseCreate(d *schema.ResourceData, meta interface{}) err } d.SetId(id) + // Note: Databases that are created with POSTGRESQL dialect do not support extra DDL + // statements at the time of database creation. To avoid users needing to run + // `terraform apply` twice to get their desired outcome, the provider does not set + // `extraStatements` in the call to the `create` endpoint and all DDL (other than + // ) is run post-create, by calling the `updateDdl` endpoint + + _, ok := opRes["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + retention, retentionPeriodOk := d.GetOk("version_retention_period") + retentionPeriod := retention.(string) + ddl, ddlOk := d.GetOk("ddl") + ddlStatements := ddl.([]interface{}) + + if retentionPeriodOk || ddlOk { + + obj := make(map[string]interface{}) + updateDdls := []string{} + + if ddlOk { + for i := 0; i < len(ddlStatements); i++ { + updateDdls = append(updateDdls, ddlStatements[i].(string)) + } + } + + if retentionPeriodOk { + dbName := d.Get("name") + retentionDdl := fmt.Sprintf("ALTER DATABASE `%s` SET OPTIONS (version_retention_period=\"%s\")", dbName, retentionPeriod) + if dialect, ok := d.GetOk("database_dialect"); ok && dialect == "POSTGRESQL" { + retentionDdl = fmt.Sprintf("ALTER DATABASE \"%s\" SET spanner.version_retention_period TO \"%s\"", dbName, retentionPeriod) + } + updateDdls = append(updateDdls, retentionDdl) + } + + log.Printf("[DEBUG] Applying extra DDL statements to the new Database: %#v", updateDdls) + + obj["statements"] = updateDdls + + url, err = replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}/ddl") + if err != nil { + return err + } + + res, err = sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error executing DDL statements on Database: %s", err) + } + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = spannerOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Database", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to run DDL against newly-created Database: %s", err) + } + } + log.Printf("[DEBUG] Finished creating Database %q: %#v", d.Id(), res) return resourceSpannerDatabaseRead(d, meta) @@ -319,6 +436,9 @@ func resourceSpannerDatabaseRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("name", flattenSpannerDatabaseName(res["name"], d, config)); err != nil { return fmt.Errorf("Error reading Database: %s", err) } + if err := d.Set("version_retention_period", flattenSpannerDatabaseVersionRetentionPeriod(res["versionRetentionPeriod"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } if err := d.Set("state", flattenSpannerDatabaseState(res["state"], d, config)); err != nil { return fmt.Errorf("Error reading Database: %s", err) } @@ -352,9 +472,15 @@ func resourceSpannerDatabaseUpdate(d *schema.ResourceData, meta interface{}) err d.Partial(true) - if d.HasChange("ddl") { + if d.HasChange("version_retention_period") || d.HasChange("ddl") { obj := make(map[string]interface{}) + versionRetentionPeriodProp, err := expandSpannerDatabaseVersionRetentionPeriod(d.Get("version_retention_period"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_retention_period"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionRetentionPeriodProp)) { + obj["versionRetentionPeriod"] = versionRetentionPeriodProp + } extraStatementsProp, err := expandSpannerDatabaseDdl(d.Get("ddl"), d, config) if err != nil { return err @@ -478,6 +604,10 @@ func flattenSpannerDatabaseName(v interface{}, d *schema.ResourceData, config *C return NameFromSelfLinkStateFunc(v) } +func flattenSpannerDatabaseVersionRetentionPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenSpannerDatabaseState(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -514,6 +644,10 @@ func expandSpannerDatabaseName(v interface{}, d TerraformResourceData, config *C return v, nil } +func expandSpannerDatabaseVersionRetentionPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandSpannerDatabaseDdl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -558,8 +692,17 @@ func resourceSpannerDatabaseEncoder(d *schema.ResourceData, meta interface{}, ob if dialect, ok := obj["databaseDialect"]; ok && dialect == "POSTGRESQL" { obj["createStatement"] = fmt.Sprintf("CREATE DATABASE \"%s\"", obj["name"]) } + + // Extra DDL statements are removed from the create request and instead applied to the database in + // a post-create action, to accommodate retrictions when creating PostgreSQL-enabled databases. + // https://cloud.google.com/spanner/docs/create-manage-databases#create_a_database + log.Printf("[DEBUG] Preparing to create new Database. Any extra DDL statements will be applied to the Database in a separate API call") + delete(obj, "name") delete(obj, "instance") + + delete(obj, "versionRetentionPeriod") + delete(obj, "extraStatements") return obj, nil } @@ -574,8 +717,19 @@ func resourceSpannerDatabaseUpdateEncoder(d *schema.ResourceData, meta interface updateDdls = append(updateDdls, newDdls[i].(string)) } + //Add statement to update version_retention_period property, if needed + if d.HasChange("version_retention_period") { + dbName := d.Get("name") + retentionDdl := fmt.Sprintf("ALTER DATABASE `%s` SET OPTIONS (version_retention_period=\"%s\")", dbName, obj["versionRetentionPeriod"]) + if dialect, ok := d.GetOk("database_dialect"); ok && dialect == "POSTGRESQL" { + retentionDdl = fmt.Sprintf("ALTER DATABASE \"%s\" SET spanner.version_retention_period TO \"%s\"", dbName, obj["versionRetentionPeriod"]) + } + updateDdls = append(updateDdls, retentionDdl) + } + obj["statements"] = updateDdls delete(obj, "name") + delete(obj, "versionRetentionPeriod") delete(obj, "instance") delete(obj, "extraStatements") return obj, nil diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database_generated_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database_generated_test.go index 7e02081b56..3128bf40f7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database_generated_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database_generated_test.go @@ -60,6 +60,7 @@ resource "google_spanner_instance" "main" { resource "google_spanner_database" "database" { instance = google_spanner_instance.main.name name = "tf-test-my-database%{random_suffix}" + version_retention_period = "3d" ddl = [ "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)", diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database_test.go index 3fde3dd5f8..613ef0be05 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database_test.go @@ -25,6 +25,7 @@ func TestAccSpannerDatabase_basic(t *testing.T) { Config: testAccSpannerDatabase_basic(instanceName, databaseName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "1h"), // default set by API ), }, { @@ -38,6 +39,7 @@ func TestAccSpannerDatabase_basic(t *testing.T) { Config: testAccSpannerDatabase_basicUpdate(instanceName, databaseName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "2d"), ), }, { @@ -105,6 +107,7 @@ resource "google_spanner_instance" "basic" { resource "google_spanner_database" "basic" { instance = google_spanner_instance.basic.name name = "%s" + version_retention_period = "2d" # increase from default 1h ddl = [ "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)", @@ -171,6 +174,11 @@ resource "google_spanner_database" "basic_spangres" { instance = google_spanner_instance.basic.name name = "%s-spangres" database_dialect = "POSTGRESQL" + // Confirm that DDL can be run at creation time for POSTGRESQL + version_retention_period = "2h" + ddl = [ + "CREATE TABLE t1 (t1 bigint NOT NULL PRIMARY KEY)", + ] deletion_protection = false } `, instanceName, instanceName, databaseName) @@ -189,8 +197,8 @@ resource "google_spanner_database" "basic_spangres" { instance = google_spanner_instance.basic.name name = "%s-spangres" database_dialect = "POSTGRESQL" + version_retention_period = "4d" ddl = [ - "CREATE TABLE t1 (t1 bigint NOT NULL PRIMARY KEY)", "CREATE TABLE t2 (t2 bigint NOT NULL PRIMARY KEY)", "CREATE TABLE t3 (t3 bigint NOT NULL PRIMARY KEY)", "CREATE TABLE t4 (t4 bigint NOT NULL PRIMARY KEY)", @@ -200,6 +208,156 @@ resource "google_spanner_database" "basic_spangres" { `, instanceName, instanceName, databaseName) } +func TestAccSpannerDatabase_versionRetentionPeriod(t *testing.T) { + t.Parallel() + + rnd := randString(t, 10) + instanceName := fmt.Sprintf("tf-test-%s", rnd) + databaseName := fmt.Sprintf("tfgen_%s", rnd) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSpannerDatabaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Test creating a database with `version_retention_period` set + Config: testAccSpannerDatabase_versionRetentionPeriod(instanceName, databaseName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "2h"), + ), + }, + { + // Test removing `version_retention_period` and setting retention period to a new value with a DDL statement in `ddl` + Config: testAccSpannerDatabase_versionRetentionPeriodUpdate1(instanceName, databaseName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "4h"), + ), + }, + { + // Test that adding `version_retention_period` controls retention time, regardless of any previous statements in `ddl` + Config: testAccSpannerDatabase_versionRetentionPeriodUpdate2(instanceName, databaseName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "2h"), + ), + }, + { + // Test that changing the retention value via DDL when `version_retention_period` is set: + // - changes the value (from 2h to 8h) + // - is unstable; non-empty plan afterwards due to conflict + Config: testAccSpannerDatabase_versionRetentionPeriodUpdate3(instanceName, databaseName), + ExpectNonEmptyPlan: true, // is unstable + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "8h"), + ), + }, + { + // Test that when the above config is reapplied: + // - changes the value (reverts to set value of `version_retention_period`, 2h) + // - is stable; no further conflict + Config: testAccSpannerDatabase_versionRetentionPeriodUpdate3(instanceName, databaseName), //same as previous step + ExpectNonEmptyPlan: false, // is stable + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "2h"), + ), + }, + }, + }) +} + +func testAccSpannerDatabase_versionRetentionPeriod(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + version_retention_period = "2h" + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName) +} + +func testAccSpannerDatabase_versionRetentionPeriodUpdate1(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + // Change 1/2 : deleted version_retention_period argument + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "ALTER DATABASE %s SET OPTIONS (version_retention_period=\"4h\")", // Change 2/2 : set retention with new DDL + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName, databaseName) +} + +func testAccSpannerDatabase_versionRetentionPeriodUpdate2(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + version_retention_period = "2h" // Change : added version_retention_period argument + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "ALTER DATABASE %s SET OPTIONS (version_retention_period=\"4h\")", + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName, databaseName) +} + +func testAccSpannerDatabase_versionRetentionPeriodUpdate3(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + version_retention_period = "2h" + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "ALTER DATABASE %s SET OPTIONS (version_retention_period=\"4h\")", + "ALTER DATABASE %s SET OPTIONS (version_retention_period=\"8h\")", // Change : set retention with new DDL + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName, databaseName, databaseName) +} + // Unit Tests for type spannerDatabaseId func TestDatabaseNameForApi(t *testing.T) { id := spannerDatabaseId{ @@ -290,6 +448,77 @@ func TestSpannerDatabase_resourceSpannerDBDdlCustomDiffFuncForceNew(t *testing.T } } +// Unit Tests for validation of retention period argument +func TestValidateDatabaseRetentionPeriod(t *testing.T) { + t.Parallel() + testCases := map[string]struct { + input string + expectError bool + }{ + // Not valid input + "empty_string": { + input: "", + expectError: true, + }, + "number_with_no_unit": { + input: "1", + expectError: true, + }, + "less_than_1h": { + input: "59m", + expectError: true, + }, + "more_than_7days": { + input: "8d", + expectError: true, + }, + // Valid input + "1_hour_in_secs": { + input: "3600s", + expectError: false, + }, + "1_hour_in_mins": { + input: "60m", + expectError: false, + }, + "1_hour_in_hours": { + input: "1h", + expectError: false, + }, + "7_days_in_secs": { + input: fmt.Sprintf("%ds", 7*24*60*60), + expectError: false, + }, + "7_days_in_mins": { + input: fmt.Sprintf("%dm", 7*24*60), + expectError: false, + }, + "7_days_in_hours": { + input: fmt.Sprintf("%dh", 7*24), + expectError: false, + }, + "7_days_in_days": { + input: "7d", + expectError: false, + }, + } + + for tn, tc := range testCases { + t.Run(tn, func(t *testing.T) { + _, errs := validateDatabaseRetentionPeriod(tc.input, "foobar") + var wantErrCount string + if tc.expectError { + wantErrCount = "1+" + } else { + wantErrCount = "0" + } + if (len(errs) > 0 && tc.expectError == false) || (len(errs) == 0 && tc.expectError == true) { + t.Errorf("failed, expected `%s` test case validation to have %s errors", tn, wantErrCount) + } + }) + } +} + func TestAccSpannerDatabase_deletionProtection(t *testing.T) { skipIfVcr(t) t.Parallel() diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance.go index f6f4b59ec0..9e43028793 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance.go @@ -157,6 +157,30 @@ func resourceSqlDatabaseInstance() *schema.Resource { }, }, }, + "sql_server_audit_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `The name of the destination bucket (e.g., gs://mybucket).`, + }, + "retention_interval": { + Type: schema.TypeString, + Optional: true, + Description: `How long to keep generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s"..`, + }, + "upload_interval": { + Type: schema.TypeString, + Optional: true, + Description: `How often to upload generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + }, + }, + }, "availability_type": { Type: schema.TypeString, Optional: true, @@ -243,6 +267,7 @@ is set to true.`, "collation": { Type: schema.TypeString, Optional: true, + ForceNew: true, Description: `The name of server instance collation.`, }, "database_flags": { @@ -350,6 +375,11 @@ is set to true.`, AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, Description: `The preferred compute engine zone.`, }, + "secondary_zone": { + Type: schema.TypeString, + Optional: true, + Description: `The preferred Compute Engine zone for the secondary/failover`, + }, }, }, }, @@ -432,6 +462,48 @@ is set to true.`, }, Description: `Configuration of Query Insights.`, }, + "password_validation_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_length": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + Description: `Minimum number of characters allowed.`, + }, + "complexity": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"COMPLEXITY_DEFAULT", "COMPLEXITY_UNSPECIFIED"}, false), + Description: `Password complexity.`, + }, + "reuse_interval": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + Description: `Number of previous passwords that cannot be reused.`, + }, + "disallow_username_substring": { + Type: schema.TypeBool, + Optional: true, + Description: `Disallow username as a part of the password.`, + }, + "password_change_interval": { + Type: schema.TypeString, + Optional: true, + Description: `Minimum interval after which the password can be changed. This flag is only supported for PostgresSQL.`, + }, + "enable_password_policy": { + Type: schema.TypeBool, + Required: true, + Description: `Whether the password policy is enabled or not.`, + }, + }, + }, + }, }, }, Description: `The settings to use for the database. The configuration is detailed below.`, @@ -462,7 +534,7 @@ is set to true.`, Optional: true, ForceNew: true, Sensitive: true, - Description: `Initial root password. Required for MS SQL Server, ignored by MySQL and PostgreSQL.`, + Description: `Initial root password. Required for MS SQL Server.`, }, "ip_address": { @@ -800,10 +872,7 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) instance.Settings = desiredSettings } - // MSSQL Server require rootPassword to be set - if strings.Contains(instance.DatabaseVersion, "SQLSERVER") { - instance.RootPassword = d.Get("root_password").(string) - } + instance.RootPassword = d.Get("root_password").(string) // Modifying a replica during Create can cause problems if the master is // modified at the same time. Lock the master until we're done in order @@ -812,6 +881,7 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) mutexKV.Lock(instanceMutexKey(project, instance.MasterInstanceName)) defer mutexKV.Unlock(instanceMutexKey(project, instance.MasterInstanceName)) } + if k, ok := d.GetOk("encryption_key_name"); ok { instance.DiskEncryptionConfiguration = &sqladmin.DiskEncryptionConfiguration{ KmsKeyName: k.(string), @@ -952,23 +1022,25 @@ func expandSqlDatabaseInstanceSettings(configured []interface{}) *sqladmin.Setti _settings := configured[0].(map[string]interface{}) settings := &sqladmin.Settings{ // Version is unset in Create but is set during update - SettingsVersion: int64(_settings["version"].(int)), - Tier: _settings["tier"].(string), - ForceSendFields: []string{"StorageAutoResize"}, - ActivationPolicy: _settings["activation_policy"].(string), - ActiveDirectoryConfig: expandActiveDirectoryConfig(_settings["active_directory_config"].([]interface{})), - AvailabilityType: _settings["availability_type"].(string), - Collation: _settings["collation"].(string), - DataDiskSizeGb: int64(_settings["disk_size"].(int)), - DataDiskType: _settings["disk_type"].(string), - PricingPlan: _settings["pricing_plan"].(string), - UserLabels: convertStringMap(_settings["user_labels"].(map[string]interface{})), - BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})), - DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].([]interface{})), - IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{})), - LocationPreference: expandLocationPreference(_settings["location_preference"].([]interface{})), - MaintenanceWindow: expandMaintenanceWindow(_settings["maintenance_window"].([]interface{})), - InsightsConfig: expandInsightsConfig(_settings["insights_config"].([]interface{})), + SettingsVersion: int64(_settings["version"].(int)), + Tier: _settings["tier"].(string), + ForceSendFields: []string{"StorageAutoResize"}, + ActivationPolicy: _settings["activation_policy"].(string), + ActiveDirectoryConfig: expandActiveDirectoryConfig(_settings["active_directory_config"].([]interface{})), + SqlServerAuditConfig: expandSqlServerAuditConfig(_settings["sql_server_audit_config"].([]interface{})), + AvailabilityType: _settings["availability_type"].(string), + Collation: _settings["collation"].(string), + DataDiskSizeGb: int64(_settings["disk_size"].(int)), + DataDiskType: _settings["disk_type"].(string), + PricingPlan: _settings["pricing_plan"].(string), + UserLabels: convertStringMap(_settings["user_labels"].(map[string]interface{})), + BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})), + DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].([]interface{})), + IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{})), + LocationPreference: expandLocationPreference(_settings["location_preference"].([]interface{})), + MaintenanceWindow: expandMaintenanceWindow(_settings["maintenance_window"].([]interface{})), + InsightsConfig: expandInsightsConfig(_settings["insights_config"].([]interface{})), + PasswordValidationPolicy: expandPasswordValidationPolicy(_settings["password_validation_policy"].([]interface{})), } resize := _settings["disk_autoresize"].(bool) @@ -1040,6 +1112,7 @@ func expandLocationPreference(configured []interface{}) *sqladmin.LocationPrefer return &sqladmin.LocationPreference{ FollowGaeApplication: _locationPreference["follow_gae_application"].(string), Zone: _locationPreference["zone"].(string), + SecondaryZone: _locationPreference["secondary_zone"].(string), } } @@ -1131,6 +1204,20 @@ func expandActiveDirectoryConfig(configured interface{}) *sqladmin.SqlActiveDire } } +func expandSqlServerAuditConfig(configured interface{}) *sqladmin.SqlServerAuditConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.SqlServerAuditConfig{ + Bucket: config["bucket"].(string), + RetentionInterval: config["retention_interval"].(string), + UploadInterval: config["upload_interval"].(string), + } +} + func expandInsightsConfig(configured []interface{}) *sqladmin.InsightsConfig { if len(configured) == 0 || configured[0] == nil { return nil @@ -1145,6 +1232,22 @@ func expandInsightsConfig(configured []interface{}) *sqladmin.InsightsConfig { } } +func expandPasswordValidationPolicy(configured []interface{}) *sqladmin.PasswordValidationPolicy { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _passwordValidationPolicy := configured[0].(map[string]interface{}) + return &sqladmin.PasswordValidationPolicy{ + MinLength: int64(_passwordValidationPolicy["min_length"].(int)), + Complexity: _passwordValidationPolicy["complexity"].(string), + ReuseInterval: int64(_passwordValidationPolicy["reuse_interval"].(int)), + DisallowUsernameSubstring: _passwordValidationPolicy["disallow_username_substring"].(bool), + PasswordChangeInterval: _passwordValidationPolicy["password_change_interval"].(string), + EnablePasswordPolicy: _passwordValidationPolicy["enable_password_policy"].(bool), + } +} + func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) userAgent, err := generateUserAgentString(d, config.userAgent) @@ -1185,6 +1288,7 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e if err := d.Set("settings", flattenSettings(instance.Settings)); err != nil { log.Printf("[WARN] Failed to set SQL Database Instance Settings") } + if instance.DiskEncryptionConfiguration != nil { if err := d.Set("encryption_key_name", instance.DiskEncryptionConfiguration.KmsKeyName); err != nil { return fmt.Errorf("Error setting encryption_key_name: %s", err) @@ -1259,6 +1363,9 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) Settings: expandSqlDatabaseInstanceSettings(d.Get("settings").([]interface{})), } + // Collation cannot be included in the update request + instance.Settings.Collation = "" + // Lock on the master_instance_name just in case updating any replica // settings causes operations on the master. if v, ok := d.GetOk("master_instance_name"); ok { @@ -1361,21 +1468,26 @@ func resourceSqlDatabaseInstanceImport(d *schema.ResourceData, meta interface{}) func flattenSettings(settings *sqladmin.Settings) []map[string]interface{} { data := map[string]interface{}{ - "version": settings.SettingsVersion, - "tier": settings.Tier, - "activation_policy": settings.ActivationPolicy, - "availability_type": settings.AvailabilityType, - "collation": settings.Collation, - "disk_type": settings.DataDiskType, - "disk_size": settings.DataDiskSizeGb, - "pricing_plan": settings.PricingPlan, - "user_labels": settings.UserLabels, + "version": settings.SettingsVersion, + "tier": settings.Tier, + "activation_policy": settings.ActivationPolicy, + "availability_type": settings.AvailabilityType, + "collation": settings.Collation, + "disk_type": settings.DataDiskType, + "disk_size": settings.DataDiskSizeGb, + "pricing_plan": settings.PricingPlan, + "user_labels": settings.UserLabels, + "password_validation_policy": settings.PasswordValidationPolicy, } if settings.ActiveDirectoryConfig != nil { data["active_directory_config"] = flattenActiveDirectoryConfig(settings.ActiveDirectoryConfig) } + if settings.SqlServerAuditConfig != nil { + data["sql_server_audit_config"] = flattenSqlServerAuditConfig(settings.SqlServerAuditConfig) + } + if settings.BackupConfiguration != nil { data["backup_configuration"] = flattenBackupConfiguration(settings.BackupConfiguration) } @@ -1407,6 +1519,10 @@ func flattenSettings(settings *sqladmin.Settings) []map[string]interface{} { data["user_labels"] = settings.UserLabels } + if settings.PasswordValidationPolicy != nil { + data["password_validation_policy"] = flattenPasswordValidationPolicy(settings.PasswordValidationPolicy) + } + return []map[string]interface{}{data} } @@ -1447,6 +1563,19 @@ func flattenActiveDirectoryConfig(sqlActiveDirectoryConfig *sqladmin.SqlActiveDi } } +func flattenSqlServerAuditConfig(sqlServerAuditConfig *sqladmin.SqlServerAuditConfig) []map[string]interface{} { + if sqlServerAuditConfig == nil { + return nil + } + return []map[string]interface{}{ + { + "bucket": sqlServerAuditConfig.Bucket, + "retention_interval": sqlServerAuditConfig.RetentionInterval, + "upload_interval": sqlServerAuditConfig.UploadInterval, + }, + } +} + func flattenDatabaseFlags(databaseFlags []*sqladmin.DatabaseFlags) []map[string]interface{} { flags := make([]map[string]interface{}, 0, len(databaseFlags)) @@ -1497,6 +1626,7 @@ func flattenLocationPreference(locationPreference *sqladmin.LocationPreference) data := map[string]interface{}{ "follow_gae_application": locationPreference.FollowGaeApplication, "zone": locationPreference.Zone, + "secondary_zone": locationPreference.SecondaryZone, } return []map[string]interface{}{data} @@ -1587,6 +1717,18 @@ func flattenInsightsConfig(insightsConfig *sqladmin.InsightsConfig) interface{} return []map[string]interface{}{data} } +func flattenPasswordValidationPolicy(passwordValidationPolicy *sqladmin.PasswordValidationPolicy) interface{} { + data := map[string]interface{}{ + "min_length": passwordValidationPolicy.MinLength, + "complexity": passwordValidationPolicy.Complexity, + "reuse_interval": passwordValidationPolicy.ReuseInterval, + "disallow_username_substring": passwordValidationPolicy.DisallowUsernameSubstring, + "password_change_interval": passwordValidationPolicy.PasswordChangeInterval, + "enable_password_policy": passwordValidationPolicy.EnablePasswordPolicy, + } + return []map[string]interface{}{data} +} + func instanceMutexKey(project, instance_name string) string { return fmt.Sprintf("google-sql-database-instance-%s-%s", project, instance_name) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance_test.go index 20232e50b5..11f3f364ad 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance_test.go @@ -222,6 +222,16 @@ func TestAccSqlDatabaseInstance_basicMSSQL(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"root_password", "deletion_protection"}, }, + { + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_update_mssql, databaseName, rootPassword), + }, + { + ResourceName: "google_sql_database_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"root_password", "deletion_protection"}, + }, }, }) } @@ -299,6 +309,30 @@ func TestAccSqlDatabaseInstance_settings_basic(t *testing.T) { }) } +func TestAccSqlDatabaseInstance_settings_secondary(t *testing.T) { + t.Parallel() + + databaseName := "tf-test-" + randString(t, 10) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_settings_secondary, databaseName), + }, + { + ResourceName: "google_sql_database_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + func TestAccSqlDatabaseInstance_settings_deletionProtection(t *testing.T) { t.Parallel() @@ -1031,13 +1065,14 @@ func TestAccSqlDatabaseInstance_encryptionKey(t *testing.T) { t.Parallel() context := map[string]interface{}{ + "project_id": getTestProjectFromEnv(), "key_name": "tf-test-key-" + randString(t, 10), "instance_name": "tf-test-sql-" + randString(t, 10), } vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), Steps: []resource.TestStep{ { @@ -1064,13 +1099,14 @@ func TestAccSqlDatabaseInstance_encryptionKey_replicaInDifferentRegion(t *testin t.Parallel() context := map[string]interface{}{ + "project_id": getTestProjectFromEnv(), "key_name": "tf-test-key-" + randString(t, 10), "instance_name": "tf-test-sql-" + randString(t, 10), } vcrTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProvidersOiCS, + Providers: testAccProviders, CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), Steps: []resource.TestStep{ { @@ -1119,6 +1155,93 @@ func TestAccSqlDatabaseInstance_ActiveDirectory(t *testing.T) { }) } +func TestAccSqlDatabaseInstance_SqlServerAuditConfig(t *testing.T) { + t.Parallel() + databaseName := "tf-test-" + randString(t, 10) + rootPassword := randString(t, 15) + addressName := "tf-test-" + randString(t, 10) + networkName := BootstrapSharedTestNetwork(t, "sql-instance-private-allocated-ip-range") + bucketName := fmt.Sprintf("%s-%d", "tf-test-bucket", randInt(t)) + uploadInterval := "900s" + retentionInterval := "86400s" + bucketNameUpdate := fmt.Sprintf("%s-%d", "tf-test-bucket", randInt(t)) + "update" + uploadIntervalUpdate := "1200s" + retentionIntervalUpdate := "172800s" + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleSqlDatabaseInstance_SqlServerAuditConfig(networkName, addressName, databaseName, rootPassword, bucketName, uploadInterval, retentionInterval), + }, + { + ResourceName: "google_sql_database_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"root_password", "deletion_protection"}, + }, + { + Config: testGoogleSqlDatabaseInstance_SqlServerAuditConfig(networkName, addressName, databaseName, rootPassword, bucketNameUpdate, uploadIntervalUpdate, retentionIntervalUpdate), + }, + { + ResourceName: "google_sql_database_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"root_password", "deletion_protection"}, + }, + }, + }) +} + +func TestAccSqlDatabaseInstance_sqlMysqlInstancePvpExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "deletion_protection": false, + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccSqlDatabaseInstance_sqlMysqlInstancePvpExample(context), + }, + { + ResourceName: "google_sql_database_instance.mysql_pvp_instance_name", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection", "root_password"}, + }, + }, + }) +} + +func testAccSqlDatabaseInstance_sqlMysqlInstancePvpExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_sql_database_instance" "mysql_pvp_instance_name" { + name = "tf-test-mysql-pvp-instance-name%{random_suffix}" + region = "asia-northeast1" + database_version = "MYSQL_8_0" + root_password = "abcABC123!" + settings { + tier = "db-f1-micro" + password_validation_policy { + min_length = 6 + complexity = "COMPLEXITY_DEFAULT" + reuse_interval = 2 + disallow_username_substring = true + enable_password_policy = true + } + } + deletion_protection = "%{deletion_protection}" +} +`, context) +} + var testGoogleSqlDatabaseInstance_basic2 = ` resource "google_sql_database_instance" "instance" { region = "us-central1" @@ -1155,6 +1278,23 @@ resource "google_sql_database_instance" "instance" { } ` +var testGoogleSqlDatabaseInstance_update_mssql = ` +resource "google_sql_database_instance" "instance" { + name = "%s" + database_version = "SQLSERVER_2019_STANDARD" + root_password = "%s" + deletion_protection = false + settings { + tier = "db-custom-1-3840" + collation = "Polish_CI_AS" + ip_configuration { + ipv4_enabled = true + require_ssl = true + } + } +} +` + func testGoogleSqlDatabaseInstance_ActiveDirectoryConfig(databaseName, networkName, addressRangeName, rootPassword, adDomainName string) string { return fmt.Sprintf(` data "google_compute_network" "servicenet" { @@ -1196,6 +1336,55 @@ resource "google_sql_database_instance" "instance-with-ad" { }`, networkName, addressRangeName, databaseName, rootPassword, adDomainName) } +func testGoogleSqlDatabaseInstance_SqlServerAuditConfig(networkName, addressName, databaseName, rootPassword, bucketName, uploadInterval, retentionInterval string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "gs-bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +data "google_compute_network" "servicenet" { + name = "%s" +} + +resource "google_compute_global_address" "foobar" { + name = "%s" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = data.google_compute_network.servicenet.self_link +} + +resource "google_service_networking_connection" "foobar" { + network = data.google_compute_network.servicenet.self_link + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.foobar.name] +} + +resource "google_sql_database_instance" "instance" { + depends_on = [google_service_networking_connection.foobar] + name = "%s" + region = "us-central1" + database_version = "SQLSERVER_2017_STANDARD" + root_password = "%s" + deletion_protection = false + settings { + tier = "db-custom-1-3840" + ip_configuration { + ipv4_enabled = "false" + private_network = data.google_compute_network.servicenet.self_link + } + sql_server_audit_config { + bucket = "gs://%s" + retention_interval = "%s" + upload_interval = "%s" + } + } +} +`, bucketName, networkName, addressName, databaseName, rootPassword, bucketName, retentionInterval, uploadInterval) +} + func testGoogleSqlDatabaseInstanceConfig_withoutReplica(instanceName string) string { return fmt.Sprintf(` resource "google_sql_database_instance" "instance" { @@ -1482,6 +1671,39 @@ resource "google_sql_database_instance" "instance" { } } ` + +var testGoogleSqlDatabaseInstance_settings_secondary = ` +resource "google_sql_database_instance" "instance" { + name = "%s" + region = "us-central1" + database_version = "MYSQL_5_7" + deletion_protection = false + settings { + tier = "db-f1-micro" + location_preference { + zone = "us-central1-f" + secondary_zone = "us-central1-a" + } + + ip_configuration { + ipv4_enabled = "true" + authorized_networks { + value = "108.12.12.12" + name = "misc" + expiration_time = "2037-11-15T16:19:00.094Z" + } + } + + backup_configuration { + enabled = "true" + start_time = "19:19" + } + + activation_policy = "ALWAYS" + } +} +` + var testGoogleSqlDatabaseInstance_settings_deletionProtection = ` resource "google_sql_database_instance" "instance" { name = "%s" @@ -1796,37 +2018,29 @@ resource "google_sql_database_instance" "instance" { } ` var testGoogleSqlDatabaseInstance_encryptionKey = ` -resource "google_project_service_identity" "gcp_sa_cloud_sql" { - provider = google-beta - service = "sqladmin.googleapis.com" +data "google_project" "project" { + project_id = "%{project_id}" } - resource "google_kms_key_ring" "keyring" { - provider = google-beta - name = "%{key_name}" location = "us-central1" } resource "google_kms_crypto_key" "key" { - provider = google-beta - name = "%{key_name}" key_ring = google_kms_key_ring.keyring.id } resource "google_kms_crypto_key_iam_binding" "crypto_key" { - provider = google-beta crypto_key_id = google_kms_crypto_key.key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" members = [ - "serviceAccount:${google_project_service_identity.gcp_sa_cloud_sql.email}", + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-cloud-sql.iam.gserviceaccount.com", ] } resource "google_sql_database_instance" "master" { - provider = google-beta name = "%{instance_name}-master" database_version = "MYSQL_5_7" region = "us-central1" @@ -1845,7 +2059,6 @@ resource "google_sql_database_instance" "master" { } resource "google_sql_database_instance" "replica" { - provider = google-beta name = "%{instance_name}-replica" database_version = "MYSQL_5_7" region = "us-central1" @@ -1861,37 +2074,32 @@ resource "google_sql_database_instance" "replica" { ` var testGoogleSqlDatabaseInstance_encryptionKey_replicaInDifferentRegion = ` -resource "google_project_service_identity" "gcp_sa_cloud_sql" { - provider = google-beta - service = "sqladmin.googleapis.com" + +data "google_project" "project" { + project_id = "%{project_id}" } resource "google_kms_key_ring" "keyring" { - provider = google-beta - name = "%{key_name}" location = "us-central1" } resource "google_kms_crypto_key" "key" { - provider = google-beta name = "%{key_name}" key_ring = google_kms_key_ring.keyring.id } resource "google_kms_crypto_key_iam_binding" "crypto_key" { - provider = google-beta crypto_key_id = google_kms_crypto_key.key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" members = [ - "serviceAccount:${google_project_service_identity.gcp_sa_cloud_sql.email}", + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-cloud-sql.iam.gserviceaccount.com", ] } resource "google_sql_database_instance" "master" { - provider = google-beta name = "%{instance_name}-master" database_version = "MYSQL_5_7" region = "us-central1" @@ -1910,31 +2118,27 @@ resource "google_sql_database_instance" "master" { } resource "google_kms_key_ring" "keyring-rep" { - provider = google-beta name = "%{key_name}-rep" location = "us-east1" } resource "google_kms_crypto_key" "key-rep" { - provider = google-beta name = "%{key_name}-rep" key_ring = google_kms_key_ring.keyring-rep.id } resource "google_kms_crypto_key_iam_binding" "crypto_key_rep" { - provider = google-beta crypto_key_id = google_kms_crypto_key.key-rep.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" members = [ - "serviceAccount:${google_project_service_identity.gcp_sa_cloud_sql.email}", + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-cloud-sql.iam.gserviceaccount.com", ] } resource "google_sql_database_instance" "replica" { - provider = google-beta name = "%{instance_name}-replica" database_version = "MYSQL_5_7" region = "us-east1" diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket.go index 471ec0f4f4..72a0428c4d 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket.go @@ -207,6 +207,18 @@ func resourceStorageBucket() *schema.Resource { Optional: true, Description: `Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition.`, }, + "matches_prefix": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `One or more matching name prefixes to satisfy this condition.`, + }, + "matches_suffix": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `One or more matching name suffixes to satisfy this condition.`, + }, }, }, Description: `The Lifecycle Rule's condition configuration.`, @@ -389,6 +401,9 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error // Get the bucket and location bucket := d.Get("name").(string) + if err := checkGCSName(bucket); err != nil { + return err + } location := d.Get("location").(string) // Create a bucket, setting the labels, location and name. @@ -986,6 +1001,8 @@ func flattenBucketLifecycleRuleCondition(condition *storage.BucketLifecycleRuleC "days_since_custom_time": int(condition.DaysSinceCustomTime), "days_since_noncurrent_time": int(condition.DaysSinceNoncurrentTime), "noncurrent_time_before": condition.NoncurrentTimeBefore, + "matches_prefix": convertStringArrToInterface(condition.MatchesPrefix), + "matches_suffix": convertStringArrToInterface(condition.MatchesSuffix), } if condition.IsLive == nil { ruleCondition["with_state"] = "ANY" @@ -1199,6 +1216,25 @@ func expandStorageBucketLifecycleRuleCondition(v interface{}) (*storage.BucketLi transformed.NoncurrentTimeBefore = v.(string) } + if v, ok := condition["matches_prefix"]; ok { + prefixes := v.([]interface{}) + transformedPrefixes := make([]string, 0, len(prefixes)) + + for _, v := range prefixes { + transformedPrefixes = append(transformedPrefixes, v.(string)) + } + transformed.MatchesPrefix = transformedPrefixes + } + if v, ok := condition["matches_suffix"]; ok { + suffixes := v.([]interface{}) + transformedSuffixes := make([]string, 0, len(suffixes)) + + for _, v := range suffixes { + transformedSuffixes = append(transformedSuffixes, v.(string)) + } + transformed.MatchesSuffix = transformedSuffixes + } + return transformed, nil } @@ -1264,6 +1300,19 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%d-", v.(int))) } + if v, ok := m["matches_prefix"]; ok { + matches_prefixes := v.([]interface{}) + for _, matches_prefix := range matches_prefixes { + buf.WriteString(fmt.Sprintf("%s-", matches_prefix)) + } + } + if v, ok := m["matches_suffix"]; ok { + matches_suffixes := v.([]interface{}) + for _, matches_suffix := range matches_suffixes { + buf.WriteString(fmt.Sprintf("%s-", matches_suffix)) + } + } + return hashcode(buf.String()) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket_test.go index 389b82b3a1..6b1ea627ac 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket_test.go @@ -1378,6 +1378,24 @@ resource "google_storage_bucket" "bucket" { with_state = "ARCHIVED" } } + lifecycle_rule { + action { + type = "Delete" + } + condition { + matches_prefix = ["test"] + age = 2 + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + matches_suffix = ["test"] + age = 2 + } + } } `, bucketName) } @@ -1443,6 +1461,24 @@ resource "google_storage_bucket" "bucket" { with_state = "ARCHIVED" } } + lifecycle_rule { + action { + type = "Delete" + } + condition { + matches_prefix = ["test"] + age = 2 + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + matches_suffix = ["test"] + age = 2 + } + } } `, bucketName) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/service_usage_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/service_usage_operation.go index 6cbb9e7fe4..ae12c59ea7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/service_usage_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/service_usage_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/spanner_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/spanner_operation.go index 3e746dcf55..9934ff31aa 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/spanner_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/spanner_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/tags_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/tags_operation.go index b9e09955d3..079eae09be 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/tags_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/tags_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/test-fixtures/cloudfunctions2/function-source-eventarc-gcs.zip b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/test-fixtures/cloudfunctions2/function-source-eventarc-gcs.zip new file mode 100644 index 0000000000..5c731f5f5a Binary files /dev/null and b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/test-fixtures/cloudfunctions2/function-source-eventarc-gcs.zip differ diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/test-fixtures/cloudfunctions2/function-source-pubsub.zip b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/test-fixtures/cloudfunctions2/function-source-pubsub.zip new file mode 100644 index 0000000000..0d6860aa6e Binary files /dev/null and b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/test-fixtures/cloudfunctions2/function-source-pubsub.zip differ diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/test-fixtures/workflow.yaml b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/test-fixtures/workflow.yaml new file mode 100644 index 0000000000..07d6c6ff1f --- /dev/null +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/test-fixtures/workflow.yaml @@ -0,0 +1,17 @@ +# This is a sample workflow that simply logs the incoming Pub/Sub event +# Note that $$ is needed for Terraform + +main: + params: [event] + steps: + - log_event: + call: sys.log + args: + text: $${event} + severity: INFO + - decode_pubsub_message: + assign: + - base64: $${base64.decode(event.data.data)} + - message: $${text.decode(base64)} + - return_pubsub_message: + return: $${message} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/tpu_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/tpu_operation.go index 490b4eef64..7ffcb03ee9 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/tpu_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/tpu_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/transport.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/transport.go index 942aebac29..f009a70667 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/transport.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/transport.go @@ -1,4 +1,3 @@ -// package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils.go index 0ce18e799b..7717851281 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils.go @@ -6,6 +6,7 @@ import ( "fmt" "log" "os" + "regexp" "sort" "strconv" "strings" @@ -525,3 +526,28 @@ func fake404(reasonResourceType, resourceName string) *googleapi.Error { Message: fmt.Sprintf("%v object %v not found", reasonResourceType, resourceName), } } + +// validate name of the gcs bucket. Guidelines are located at https://cloud.google.com/storage/docs/naming-buckets +// this does not attempt to check for IP addresses or close misspellings of "google" +func checkGCSName(name string) error { + if strings.HasPrefix(name, "goog") { + return fmt.Errorf("error: bucket name %s cannot start with %q", name, "goog") + } + + if strings.Contains(name, "google") { + return fmt.Errorf("error: bucket name %s cannot contain %q", name, "google") + } + + valid, _ := regexp.MatchString("^[a-z0-9][a-z0-9_.-]{1,220}[a-z0-9]$", name) + if !valid { + return fmt.Errorf("error: bucket name validation failed %v. See https://cloud.google.com/storage/docs/naming-buckets", name) + } + + for _, str := range strings.Split(name, ".") { + valid, _ := regexp.MatchString("^[a-z0-9_-]{1,63}$", str) + if !valid { + return fmt.Errorf("error: bucket name validation failed %v", str) + } + } + return nil +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils_test.go index 5faffc2d68..1adfb95712 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils_test.go @@ -1,6 +1,7 @@ package google import ( + "fmt" "net/url" "reflect" "strings" @@ -607,3 +608,44 @@ func TestSnakeToPascalCase(t *testing.T) { t.Fatalf("(%s) did not match expected value: %s", actual, expected) } } + +func TestCheckGCSName(t *testing.T) { + valid63 := randString(t, 63) + cases := map[string]bool{ + // Valid + "foobar": true, + "foobar1": true, + "12345": true, + "foo_bar_baz": true, + "foo-bar-baz": true, + "foo-bar_baz1": true, + "foo--bar": true, + "foo__bar": true, + "foo-goog": true, + "foo.goog": true, + valid63: true, + fmt.Sprintf("%s.%s.%s", valid63, valid63, valid63): true, + + // Invalid + "goog-foobar": false, + "foobar-google": false, + "-foobar": false, + "foobar-": false, + "_foobar": false, + "foobar_": false, + "fo": false, + "foo$bar": false, + "foo..bar": false, + randString(t, 64): false, + fmt.Sprintf("%s.%s.%s.%s", valid63, valid63, valid63, valid63): false, + } + + for bucketName, valid := range cases { + err := checkGCSName(bucketName) + if valid && err != nil { + t.Errorf("The bucket name %s was expected to pass validation and did not pass.", bucketName) + } else if !valid && err == nil { + t.Errorf("The bucket name %s was NOT expected to pass validation and passed.", bucketName) + } + } +} diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation.go index ee198c181c..4b70a2e7d6 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation.go @@ -82,7 +82,9 @@ var rfc1918Networks = []string{ "192.168.0.0/16", } -func validateGCPName(v interface{}, k string) (ws []string, errors []error) { +// validateGCEName ensures that a field matches the requirements for Compute Engine resource names +// https://cloud.google.com/compute/docs/naming-resources#resource-name-format +func validateGCEName(v interface{}, k string) (ws []string, errors []error) { re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$` return validateRegexp(re)(v, k) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation_test.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation_test.go index c46fd6cc00..46f9a03579 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation_test.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -func TestValidateGCPName(t *testing.T) { +func TestvalidateGCEName(t *testing.T) { x := []StringValidationTestCase{ // No errors {TestName: "basic", Value: "foobar"}, @@ -27,7 +27,7 @@ func TestValidateGCPName(t *testing.T) { {TestName: "too long", Value: "foobarfoobarfoobarfoobarfoobarfoobarfoobarfoobarfoobarfoobarfoob", ExpectError: true}, } - es := testStringValidationCases(x, validateGCPName) + es := testStringValidationCases(x, validateGCEName) if len(es) > 0 { t.Errorf("Failed to validate GCP names: %v", es) } diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/vpc_access_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/vpc_access_operation.go index df0d1109cc..ffd34ed1f2 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/vpc_access_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/vpc_access_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/workflows_operation.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/workflows_operation.go index 0c855d09f6..f46412abc7 100644 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/workflows_operation.go +++ b/third_party/github.com/hashicorp/terraform-provider-google-beta/google-beta/workflows_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/third_party/github.com/hashicorp/terraform-provider-google-beta/scripts/sidebar/sidebar.go b/third_party/github.com/hashicorp/terraform-provider-google-beta/scripts/sidebar/sidebar.go deleted file mode 100644 index d51f059ab5..0000000000 --- a/third_party/github.com/hashicorp/terraform-provider-google-beta/scripts/sidebar/sidebar.go +++ /dev/null @@ -1,112 +0,0 @@ -//go:generate go run sidebar.go -package main - -import ( - "io/ioutil" - "log" - "os" - "path/filepath" - "regexp" - "runtime" - "strings" - "text/template" -) - -type Entry struct { - Filename string - Product string - Resource string -} - -type Entries struct { - Resources []Entry - DataSources []Entry -} - -func main() { - _, scriptPath, _, ok := runtime.Caller(0) - if !ok { - log.Fatal("Could not get current working directory") - } - tpgDir := scriptPath - for !strings.HasPrefix(filepath.Base(tpgDir), "terraform-provider-") && tpgDir != "/" { - tpgDir = filepath.Clean(tpgDir + "/..") - } - if tpgDir == "/" { - log.Fatal("Script was run outside of google provider directory") - } - - resourcesByProduct, err := entriesByProduct(tpgDir + "/website/docs/r") - if err != nil { - panic(err) - } - dataSourcesByProduct, err := entriesByProduct(tpgDir + "/website/docs/d") - if err != nil { - panic(err) - } - allEntriesByProduct := make(map[string]Entries) - for p, e := range resourcesByProduct { - v := allEntriesByProduct[p] - v.Resources = e - allEntriesByProduct[p] = v - } - for p, e := range dataSourcesByProduct { - v := allEntriesByProduct[p] - v.DataSources = e - allEntriesByProduct[p] = v - } - - tmpl, err := template.ParseFiles(tpgDir + "/website/google.erb.tmpl") - if err != nil { - panic(err) - } - f, err := os.Create(tpgDir + "/website/google.erb") - if err != nil { - panic(err) - } - defer f.Close() - err = tmpl.Execute(f, allEntriesByProduct) - if err != nil { - panic(err) - } -} - -func entriesByProduct(dir string) (map[string][]Entry, error) { - d, err := ioutil.ReadDir(dir) - if err != nil { - return nil, err - } - - entriesByProduct := make(map[string][]Entry) - for _, f := range d { - entry, err := getEntry(dir, f.Name()) - if err != nil { - return nil, err - } - entriesByProduct[entry.Product] = append(entriesByProduct[entry.Product], entry) - } - - return entriesByProduct, nil -} - -func getEntry(dir, filename string) (Entry, error) { - file, err := ioutil.ReadFile(dir + "/" + filename) - if err != nil { - return Entry{}, err - } - - return Entry{ - Filename: strings.TrimSuffix(filename, ".markdown"), - Product: findRegex(file, `subcategory: "(.*)"`), - Resource: findRegex(file, `page_title: "Google: (.*)"`), - }, nil -} - -func findRegex(contents []byte, regex string) string { - r := regexp.MustCompile(regex) - sm := r.FindStringSubmatch(string(contents)) - if len(sm) > 1 { - return sm[1] - } - return "" -} diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations/operations.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations/operations.go index ab74fca336..aff60d94dd 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations/operations.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations/operations.go @@ -111,7 +111,7 @@ func (op *StandardGCPOperation) operate(ctx context.Context) (*dcl.RetryDetails, } if op.Error != nil { - return nil, fmt.Errorf("operation received error: %+v", op.Error) + return nil, fmt.Errorf("operation received error: %+v details: %v", op.Error, op.Response) } if len(op.response) == 0 && len(op.Response) != 0 { diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/project_id.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/project_id.go index ce698d9220..f240b4006b 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/project_id.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/project_id.go @@ -21,10 +21,10 @@ import ( ) // This matches either the entire string if it contains no forward slashes or just projects/{project_number}/ if it does. -var projectNumberRegex = regexp.MustCompile(`(^\d+$|projects/\d+)`) +var projectNumberRegex = regexp.MustCompile(`(^\d+$|projects/\d+|metricsScopes/\d+)`) // This matches either the entire string if it contains no forward slashes or just projects/{project_id}/ if it does. -var projectIDRegex = regexp.MustCompile(`(^[^/]+$|projects/[^/]+)`) +var projectIDRegex = regexp.MustCompile(`(^[^/]+$|projects/[^/]+|metricsScopes/[^/]+)`) // ProjectResponse is the response from Cloud Resource Manager. type ProjectResponse struct { @@ -50,6 +50,10 @@ func FlattenProjectNumbersToIDs(config *Config, fromServer *string) *string { if strings.HasPrefix(number, "projects/") { p.ProjectID = "projects/" + p.ProjectID } + if strings.HasPrefix(number, "metricsScopes/") { + p.ProjectID = "metricsScopes/" + p.ProjectID + } + return p.ProjectID }) return &editedServer @@ -76,6 +80,10 @@ func ExpandProjectIDsToNumbers(config *Config, fromConfig *string) (*string, err if strings.HasPrefix(id, "projects/") { p.ProjectNumber = "projects/" + p.ProjectNumber } + if strings.HasPrefix(id, "metricsScopes/") { + p.ProjectNumber = "metricsScopes/" + p.ProjectNumber + } + return p.ProjectNumber }) return &editedConfig, nil @@ -85,6 +93,7 @@ func ExpandProjectIDsToNumbers(config *Config, fromConfig *string) (*string, err func FetchProjectInfo(config *Config, projectIdentifier string) (ProjectResponse, error) { var p ProjectResponse trimmedIdentifier := strings.TrimPrefix(projectIdentifier, "projects/") + trimmedIdentifier = strings.TrimPrefix(trimmedIdentifier, "metricsScopes/") trimmedIdentifier = strings.TrimSuffix(trimmedIdentifier, "/") retryDetails, err := SendRequest(context.TODO(), config, "GET", "https://cloudresourcemanager.googleapis.com/v1/projects/"+trimmedIdentifier, nil, nil) if err != nil { diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment.go index b6bea619e6..0233b603db 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment.go @@ -129,9 +129,8 @@ func (c *Client) GetEnvironment(ctx context.Context, r *Environment) (*Environme if err != nil { return nil, err } - nr := r.urlNormalized() - result.ApigeeOrganization = nr.ApigeeOrganization - result.Name = nr.Name + result.ApigeeOrganization = r.ApigeeOrganization + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group.go index 81e3df3663..fc83608e07 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group.go @@ -173,9 +173,8 @@ func (c *Client) GetEnvironmentGroup(ctx context.Context, r *EnvironmentGroup) ( if err != nil { return nil, err } - nr := r.urlNormalized() - result.ApigeeOrganization = nr.ApigeeOrganization - result.Name = nr.Name + result.ApigeeOrganization = r.ApigeeOrganization + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group_attachment.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group_attachment.go index 64fb051b89..5dfffa5e1f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group_attachment.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group_attachment.go @@ -142,9 +142,8 @@ func (c *Client) GetEnvironmentGroupAttachment(ctx context.Context, r *Environme if err != nil { return nil, err } - nr := r.urlNormalized() - result.Envgroup = nr.Envgroup - result.Name = nr.Name + result.Envgroup = r.Envgroup + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group_attachment_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group_attachment_internal.go index cfaa3115b2..2da386e3d4 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group_attachment_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/environment_group_attachment_internal.go @@ -250,11 +250,8 @@ func (op *createEnvironmentGroupAttachmentOperation) do(ctx context.Context, r * op.response, _ = o.FirstResponse() // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetEnvironmentGroupAttachment(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/instance.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/instance.go index 97be8a6677..46152fa905 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/instance.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/instance.go @@ -212,9 +212,8 @@ func (c *Client) GetInstance(ctx context.Context, r *Instance) (*Instance, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.ApigeeOrganization = nr.ApigeeOrganization - result.Name = nr.Name + result.ApigeeOrganization = r.ApigeeOrganization + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/organization.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/organization.go index e32f6010a8..2f39762408 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/organization.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/organization.go @@ -374,8 +374,7 @@ func (c *Client) GetOrganization(ctx context.Context, r *Organization) (*Organiz if err != nil { return nil, err } - nr := r.urlNormalized() - result.Name = nr.Name + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/organization_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/organization_internal.go index bd98971192..b168858662 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/organization_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta/organization_internal.go @@ -335,11 +335,8 @@ func (op *createOrganizationOperation) do(ctx context.Context, r *Organization, op.response, _ = o.FirstResponse() // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetOrganization(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/beta/workload.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/beta/workload.go index d8cdecb3bd..754367d927 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/beta/workload.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/beta/workload.go @@ -389,10 +389,9 @@ func (c *Client) GetWorkload(ctx context.Context, r *Workload) (*Workload, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Organization = nr.Organization - result.Location = nr.Location - result.Name = nr.Name + result.Organization = r.Organization + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/beta/workload_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/beta/workload_internal.go index 974602dfb2..5b4eee801a 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/beta/workload_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/beta/workload_internal.go @@ -368,11 +368,8 @@ func (op *createWorkloadOperation) do(ctx context.Context, r *Workload, c *Clien op.response, _ = o.FirstResponse() // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetWorkload(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/assignment.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/assignment.go index 7726b32a0d..c451619d85 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/assignment.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/assignment.go @@ -204,10 +204,9 @@ func (c *Client) GetAssignment(ctx context.Context, r *Assignment) (*Assignment, if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Reservation = nr.Reservation + result.Project = r.Project + result.Location = r.Location + result.Reservation = r.Reservation c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/assignment_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/assignment_internal.go index 09d9e2190c..41e4c27c66 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/assignment_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/assignment_internal.go @@ -247,11 +247,8 @@ func (op *createAssignmentOperation) do(ctx context.Context, r *Assignment, c *C op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetAssignment(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/capacity_commitment.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/capacity_commitment.go index 364b653dd4..ed7fffcdef 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/capacity_commitment.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/capacity_commitment.go @@ -353,10 +353,9 @@ func (c *Client) GetCapacityCommitment(ctx context.Context, r *CapacityCommitmen if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/capacity_commitment_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/capacity_commitment_internal.go index 7874e74125..cd3a829338 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/capacity_commitment_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/capacity_commitment_internal.go @@ -337,11 +337,8 @@ func (op *createCapacityCommitmentOperation) do(ctx context.Context, r *Capacity op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetCapacityCommitment(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/reservation.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/reservation.go index 4376e8e360..83985e41f6 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/reservation.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation/beta/reservation.go @@ -151,10 +151,9 @@ func (c *Client) GetReservation(ctx context.Context, r *Reservation) (*Reservati if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/billingbudgets/beta/budget.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/billingbudgets/beta/budget.go index 71985f6b1e..731617ce1c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/billingbudgets/beta/budget.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/billingbudgets/beta/budget.go @@ -747,9 +747,8 @@ func (c *Client) GetBudget(ctx context.Context, r *Budget) (*Budget, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.BillingAccount = nr.BillingAccount - result.Name = nr.Name + result.BillingAccount = r.BillingAccount + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/binaryauthorization/attestor.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/binaryauthorization/attestor.go index ed34df008c..49fe578da0 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/binaryauthorization/attestor.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/binaryauthorization/attestor.go @@ -329,9 +329,8 @@ func (c *Client) GetAttestor(ctx context.Context, r *Attestor) (*Attestor, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/binaryauthorization/policy.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/binaryauthorization/policy.go index a7ab4b067a..64859113a3 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/binaryauthorization/policy.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/binaryauthorization/policy.go @@ -711,8 +711,7 @@ func (c *Client) GetPolicy(ctx context.Context, r *Policy) (*Policy, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project + result.Project = r.Project c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild/beta/worker_pool.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild/beta/worker_pool.go index 946767393e..90a285e5c7 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild/beta/worker_pool.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild/beta/worker_pool.go @@ -464,10 +464,9 @@ func (c *Client) GetWorkerPool(ctx context.Context, r *WorkerPool) (*WorkerPool, if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta/delivery_pipeline.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta/delivery_pipeline.go index d63292dc29..0831058922 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta/delivery_pipeline.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta/delivery_pipeline.go @@ -409,10 +409,9 @@ func (c *Client) GetDeliveryPipeline(ctx context.Context, r *DeliveryPipeline) ( if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta/target.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta/target.go index c67e498c7a..6a1677eb66 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta/target.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta/target.go @@ -345,10 +345,9 @@ func (c *Client) GetTarget(ctx context.Context, r *Target) (*Target, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudfunctions/beta/function.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudfunctions/beta/function.go index 8f3ca3bbf4..0e5411b8f1 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudfunctions/beta/function.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudfunctions/beta/function.go @@ -443,10 +443,9 @@ func (c *Client) GetFunction(ctx context.Context, r *Function) (*Function, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Region = nr.Region - result.Name = nr.Name + result.Project = r.Project + result.Region = r.Region + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/group.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/group.go index f1733042a3..0c49deb9cd 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/group.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/group.go @@ -643,8 +643,7 @@ func (c *Client) GetGroup(ctx context.Context, r *Group) (*Group, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Name = nr.Name + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/group_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/group_internal.go index cd142f43c8..613faa19cc 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/group_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/group_internal.go @@ -402,11 +402,8 @@ func (op *createGroupOperation) do(ctx context.Context, r *Group, c *Client) err op.response, _ = o.FirstResponse() // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetGroup(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/membership.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/membership.go index 8fda4dacf2..565a48870d 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/membership.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/membership.go @@ -577,9 +577,8 @@ func (c *Client) GetMembership(ctx context.Context, r *Membership) (*Membership, if err != nil { return nil, err } - nr := r.urlNormalized() - result.Group = nr.Group - result.Name = nr.Name + result.Group = r.Group + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/membership_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/membership_internal.go index 76ab1ec662..925cdd63f8 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/membership_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudidentity/beta/membership_internal.go @@ -367,11 +367,8 @@ func (op *createMembershipOperation) do(ctx context.Context, r *Membership, c *C op.response, _ = o.FirstResponse() // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetMembership(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/crypto_key.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/crypto_key.go index 422454265d..76b023b71d 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/crypto_key.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/crypto_key.go @@ -638,11 +638,10 @@ func (c *Client) GetCryptoKey(ctx context.Context, r *CryptoKey) (*CryptoKey, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.KeyRing = nr.KeyRing - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.KeyRing = r.KeyRing + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection.go index dc8cc9e689..d7e6b308d9 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection.go @@ -275,10 +275,9 @@ func (c *Client) GetEkmConnection(ctx context.Context, r *EkmConnection) (*EkmCo if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection.yaml index 2db9c60810..80ce2658bc 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection.yaml @@ -199,4 +199,4 @@ components: service pointing to an EKM replica, in the format `projects/*/locations/*/namespaces/*/services/*`. x-dcl-references: - resource: Servicedirectory/Service - field: selfLink + field: name diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection_schema.go index 2d1b4402e0..5641113c23 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection_schema.go @@ -230,7 +230,7 @@ func DCLEkmConnectionSchema() *dcl.Schema { ResourceReferences: []*dcl.PropertyResourceReference{ &dcl.PropertyResourceReference{ Resource: "Servicedirectory/Service", - Field: "selfLink", + Field: "name", }, }, }, diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection_yaml_embed.go index 79c0ed73d0..4c4c94b5dd 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/ekm_connection_yaml_embed.go @@ -17,7 +17,7 @@ package cloudkms // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/cloudkms/ekm_connection.yaml -var YAML_ekm_connection = []byte("info:\n title: Cloudkms/EkmConnection\n description: The Cloudkms EkmConnection resource\n x-dcl-struct-name: EkmConnection\n x-dcl-has-iam: true\npaths:\n get:\n description: The function used to get information about a EkmConnection\n parameters:\n - name: EkmConnection\n required: true\n description: A full instance of a EkmConnection\n apply:\n description: The function used to apply information about a EkmConnection\n parameters:\n - name: EkmConnection\n required: true\n description: A full instance of a EkmConnection\n list:\n description: The function used to list information about many EkmConnection\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n EkmConnection:\n title: EkmConnection\n x-dcl-id: projects/{{project}}/locations/{{location}}/ekmConnections/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: true\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - serviceResolvers\n - project\n - location\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which the EkmConnection was created.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Optional. This checksum is computed by the server based on\n the value of other fields, and may be sent on update requests to ensure\n the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The resource name for the EkmConnection in the format `projects/*/locations/*/ekmConnections/*`.\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n serviceResolvers:\n type: array\n x-dcl-go-name: ServiceResolvers\n description: A list of ServiceResolvers where the EKM can be reached. There\n should be one ServiceResolver per EKM replica. Currently, only a single\n ServiceResolver is supported.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: EkmConnectionServiceResolvers\n required:\n - serviceDirectoryService\n - hostname\n - serverCertificates\n properties:\n endpointFilter:\n type: string\n x-dcl-go-name: EndpointFilter\n description: Optional. The filter applied to the endpoints of the\n resolved service. If no filter is specified, all endpoints will\n be considered. An endpoint will be chosen arbitrarily from the filtered\n list for each request. For endpoint filter syntax and examples,\n see https://cloud.google.com/service-directory/docs/reference/rpc/google.cloud.servicedirectory.v1#resolveservicerequest.\n hostname:\n type: string\n x-dcl-go-name: Hostname\n description: Required. The hostname of the EKM replica used at TLS\n and HTTP layers.\n serverCertificates:\n type: array\n x-dcl-go-name: ServerCertificates\n description: Required. A list of leaf server certificates used to\n authenticate HTTPS connections to the EKM replica. Currently, a\n maximum of 10 Certificate is supported.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: EkmConnectionServiceResolversServerCertificates\n required:\n - rawDer\n properties:\n issuer:\n type: string\n x-dcl-go-name: Issuer\n readOnly: true\n description: Output only. The issuer distinguished name in RFC\n 2253 format. Only present if parsed is true.\n notAfterTime:\n type: string\n format: date-time\n x-dcl-go-name: NotAfterTime\n readOnly: true\n description: Output only. The certificate is not valid after\n this time. Only present if parsed is true.\n notBeforeTime:\n type: string\n format: date-time\n x-dcl-go-name: NotBeforeTime\n readOnly: true\n description: Output only. The certificate is not valid before\n this time. Only present if parsed is true.\n parsed:\n type: boolean\n x-dcl-go-name: Parsed\n readOnly: true\n description: Output only. True if the certificate was parsed\n successfully.\n rawDer:\n type: string\n x-dcl-go-name: RawDer\n description: Required. The raw certificate bytes in DER format.\n serialNumber:\n type: string\n x-dcl-go-name: SerialNumber\n readOnly: true\n description: Output only. The certificate serial number as a\n hex string. Only present if parsed is true.\n sha256Fingerprint:\n type: string\n x-dcl-go-name: Sha256Fingerprint\n readOnly: true\n description: Output only. The SHA-256 certificate fingerprint\n as a hex string. Only present if parsed is true.\n subject:\n type: string\n x-dcl-go-name: Subject\n readOnly: true\n description: Output only. The subject distinguished name in\n RFC 2253 format. Only present if parsed is true.\n subjectAlternativeDnsNames:\n type: array\n x-dcl-go-name: SubjectAlternativeDnsNames\n readOnly: true\n description: Output only. The subject Alternative DNS names.\n Only present if parsed is true.\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n serviceDirectoryService:\n type: string\n x-dcl-go-name: ServiceDirectoryService\n description: Required. The resource name of the Service Directory\n service pointing to an EKM replica, in the format `projects/*/locations/*/namespaces/*/services/*`.\n x-dcl-references:\n - resource: Servicedirectory/Service\n field: selfLink\n") +var YAML_ekm_connection = []byte("info:\n title: Cloudkms/EkmConnection\n description: The Cloudkms EkmConnection resource\n x-dcl-struct-name: EkmConnection\n x-dcl-has-iam: true\npaths:\n get:\n description: The function used to get information about a EkmConnection\n parameters:\n - name: EkmConnection\n required: true\n description: A full instance of a EkmConnection\n apply:\n description: The function used to apply information about a EkmConnection\n parameters:\n - name: EkmConnection\n required: true\n description: A full instance of a EkmConnection\n list:\n description: The function used to list information about many EkmConnection\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n EkmConnection:\n title: EkmConnection\n x-dcl-id: projects/{{project}}/locations/{{location}}/ekmConnections/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: true\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - serviceResolvers\n - project\n - location\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which the EkmConnection was created.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Optional. This checksum is computed by the server based on\n the value of other fields, and may be sent on update requests to ensure\n the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The resource name for the EkmConnection in the format `projects/*/locations/*/ekmConnections/*`.\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n serviceResolvers:\n type: array\n x-dcl-go-name: ServiceResolvers\n description: A list of ServiceResolvers where the EKM can be reached. There\n should be one ServiceResolver per EKM replica. Currently, only a single\n ServiceResolver is supported.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: EkmConnectionServiceResolvers\n required:\n - serviceDirectoryService\n - hostname\n - serverCertificates\n properties:\n endpointFilter:\n type: string\n x-dcl-go-name: EndpointFilter\n description: Optional. The filter applied to the endpoints of the\n resolved service. If no filter is specified, all endpoints will\n be considered. An endpoint will be chosen arbitrarily from the filtered\n list for each request. For endpoint filter syntax and examples,\n see https://cloud.google.com/service-directory/docs/reference/rpc/google.cloud.servicedirectory.v1#resolveservicerequest.\n hostname:\n type: string\n x-dcl-go-name: Hostname\n description: Required. The hostname of the EKM replica used at TLS\n and HTTP layers.\n serverCertificates:\n type: array\n x-dcl-go-name: ServerCertificates\n description: Required. A list of leaf server certificates used to\n authenticate HTTPS connections to the EKM replica. Currently, a\n maximum of 10 Certificate is supported.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: EkmConnectionServiceResolversServerCertificates\n required:\n - rawDer\n properties:\n issuer:\n type: string\n x-dcl-go-name: Issuer\n readOnly: true\n description: Output only. The issuer distinguished name in RFC\n 2253 format. Only present if parsed is true.\n notAfterTime:\n type: string\n format: date-time\n x-dcl-go-name: NotAfterTime\n readOnly: true\n description: Output only. The certificate is not valid after\n this time. Only present if parsed is true.\n notBeforeTime:\n type: string\n format: date-time\n x-dcl-go-name: NotBeforeTime\n readOnly: true\n description: Output only. The certificate is not valid before\n this time. Only present if parsed is true.\n parsed:\n type: boolean\n x-dcl-go-name: Parsed\n readOnly: true\n description: Output only. True if the certificate was parsed\n successfully.\n rawDer:\n type: string\n x-dcl-go-name: RawDer\n description: Required. The raw certificate bytes in DER format.\n serialNumber:\n type: string\n x-dcl-go-name: SerialNumber\n readOnly: true\n description: Output only. The certificate serial number as a\n hex string. Only present if parsed is true.\n sha256Fingerprint:\n type: string\n x-dcl-go-name: Sha256Fingerprint\n readOnly: true\n description: Output only. The SHA-256 certificate fingerprint\n as a hex string. Only present if parsed is true.\n subject:\n type: string\n x-dcl-go-name: Subject\n readOnly: true\n description: Output only. The subject distinguished name in\n RFC 2253 format. Only present if parsed is true.\n subjectAlternativeDnsNames:\n type: array\n x-dcl-go-name: SubjectAlternativeDnsNames\n readOnly: true\n description: Output only. The subject Alternative DNS names.\n Only present if parsed is true.\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n serviceDirectoryService:\n type: string\n x-dcl-go-name: ServiceDirectoryService\n description: Required. The resource name of the Service Directory\n service pointing to an EKM replica, in the format `projects/*/locations/*/namespaces/*/services/*`.\n x-dcl-references:\n - resource: Servicedirectory/Service\n field: name\n") -// 7870 bytes -// MD5: 65e48c50eff3ec700e8c17fe56063e1a +// 7866 bytes +// MD5: 005b74102868fc0c06583dec36f0be29 diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/key_ring.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/key_ring.go index 0c78821330..07bb5a2c98 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/key_ring.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms/key_ring.go @@ -143,10 +143,9 @@ func (c *Client) GetKeyRing(ctx context.Context, r *KeyRing) (*KeyRing, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/folder.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/folder.go index 26c8222d14..c8220d0c5e 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/folder.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/folder.go @@ -178,8 +178,7 @@ func (c *Client) GetFolder(ctx context.Context, r *Folder) (*Folder, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Name = nr.Name + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/folder_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/folder_internal.go index 630609eb30..bbfe7869e7 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/folder_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/folder_internal.go @@ -315,11 +315,8 @@ func (op *createFolderOperation) do(ctx context.Context, r *Folder, c *Client) e op.response, _ = o.FirstResponse() // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetFolder(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/project.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/project.go index 82dc69ed23..eee2a1c849 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/project.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/project.go @@ -172,8 +172,7 @@ func (c *Client) GetProject(ctx context.Context, r *Project) (*Project, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Name = nr.Name + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/tag_key.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/tag_key.go index 5828392193..b82659496d 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/tag_key.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/tag_key.go @@ -131,8 +131,7 @@ func (c *Client) GetTagKey(ctx context.Context, r *TagKey) (*TagKey, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Name = nr.Name + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/tag_key_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/tag_key_internal.go index 3ba3436077..902b908ce2 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/tag_key_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/beta/tag_key_internal.go @@ -276,11 +276,8 @@ func (op *createTagKeyOperation) do(ctx context.Context, r *TagKey, c *Client) e op.response, _ = o.FirstResponse() // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetTagKey(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/folder.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/folder.go index 40f2d10699..08fcfad04f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/folder.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/folder.go @@ -178,8 +178,7 @@ func (c *Client) GetFolder(ctx context.Context, r *Folder) (*Folder, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Name = nr.Name + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/folder_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/folder_internal.go index 0ff3a1a8e3..e8d4d3e6da 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/folder_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/folder_internal.go @@ -315,11 +315,8 @@ func (op *createFolderOperation) do(ctx context.Context, r *Folder, c *Client) e op.response, _ = o.FirstResponse() // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetFolder(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/project.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/project.go index 5861739d9e..b99af4558c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/project.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/project.go @@ -172,8 +172,7 @@ func (c *Client) GetProject(ctx context.Context, r *Project) (*Project, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Name = nr.Name + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/tag_key.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/tag_key.go index 8ebf37682a..bdced4ffbd 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/tag_key.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/tag_key.go @@ -131,8 +131,7 @@ func (c *Client) GetTagKey(ctx context.Context, r *TagKey) (*TagKey, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Name = nr.Name + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/tag_key_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/tag_key_internal.go index 5e5912a2fd..6bbd0ef201 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/tag_key_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager/tag_key_internal.go @@ -276,11 +276,8 @@ func (op *createTagKeyOperation) do(ctx context.Context, r *TagKey, c *Client) e op.response, _ = o.FirstResponse() // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetTagKey(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudscheduler/job.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudscheduler/job.go index f3a9108fd7..8f2400ccab 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudscheduler/job.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudscheduler/job.go @@ -733,10 +733,9 @@ func (c *Client) GetJob(ctx context.Context, r *Job) (*Job, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy.go index 4eb19b7f95..d466350f73 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy.go @@ -154,8 +154,7 @@ func (c *Client) GetFirewallPolicy(ctx context.Context, r *FirewallPolicy) (*Fir if err != nil { return nil, err } - nr := r.urlNormalized() - result.Name = nr.Name + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy_association.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy_association.go index f258d0bd38..6bdfe2fdd9 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy_association.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy_association.go @@ -142,9 +142,8 @@ func (c *Client) GetFirewallPolicyAssociation(ctx context.Context, r *FirewallPo if err != nil { return nil, err } - nr := r.urlNormalized() - result.FirewallPolicy = nr.FirewallPolicy - result.Name = nr.Name + result.FirewallPolicy = r.FirewallPolicy + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy_rule.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy_rule.go index 1490f8f7be..1bc15c0a83 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy_rule.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/firewall_policy_rule.go @@ -288,9 +288,8 @@ func (c *Client) GetFirewallPolicyRule(ctx context.Context, r *FirewallPolicyRul if err != nil { return nil, err } - nr := r.urlNormalized() - result.FirewallPolicy = nr.FirewallPolicy - result.Priority = nr.Priority + result.FirewallPolicy = r.FirewallPolicy + result.Priority = r.Priority c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/forwarding_rule.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/forwarding_rule.go index e2e69871d0..7d04cc45f3 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/forwarding_rule.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/forwarding_rule.go @@ -518,10 +518,9 @@ func (c *Client) GetForwardingRule(ctx context.Context, r *ForwardingRule) (*For if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance.go index a6d835ca58..75a8b79ad1 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance.go @@ -1083,10 +1083,9 @@ func (c *Client) GetInstance(ctx context.Context, r *Instance) (*Instance, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Zone = nr.Zone - result.Name = nr.Name + result.Project = r.Project + result.Zone = r.Zone + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager.go index a6119c244b..f31ec84bb4 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager.go @@ -1444,10 +1444,9 @@ func (c *Client) GetInstanceGroupManager(ctx context.Context, r *InstanceGroupMa if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager.yaml index cb4a0dcee4..9abcff7432 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager.yaml @@ -368,11 +368,11 @@ components: serviceAccount: type: string x-dcl-go-name: ServiceAccount - description: 'The service account to be used as credentials for all operations + description: The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, - the service account: {projectNumber}@cloudservices.gserviceaccount.com - is used.' + the service account {projectNumber}@cloudservices.gserviceaccount.com + is used. x-dcl-references: - resource: Iam/ServiceAccount field: email diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager_beta_yaml_embed.go index 15a88fdf90..ed8efde123 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/compute/beta/instance_group_manager.yaml -var YAML_instance_group_manager = []byte("info:\n title: Compute/InstanceGroupManager\n description: The Compute InstanceGroupManager resource\n x-dcl-struct-name: InstanceGroupManager\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a InstanceGroupManager\n parameters:\n - name: InstanceGroupManager\n required: true\n description: A full instance of a InstanceGroupManager\n apply:\n description: The function used to apply information about a InstanceGroupManager\n parameters:\n - name: InstanceGroupManager\n required: true\n description: A full instance of a InstanceGroupManager\n delete:\n description: The function used to delete a InstanceGroupManager\n parameters:\n - name: InstanceGroupManager\n required: true\n description: A full instance of a InstanceGroupManager\n deleteAll:\n description: The function used to delete all InstanceGroupManager\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many InstanceGroupManager\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n InstanceGroupManager:\n title: InstanceGroupManager\n x-dcl-locations:\n - zone\n - region\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - targetSize\n - project\n properties:\n autoHealingPolicies:\n type: array\n x-dcl-go-name: AutoHealingPolicies\n description: The autohealing policy for this managed instance group. You\n can specify only one value.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: InstanceGroupManagerAutoHealingPolicies\n properties:\n healthCheck:\n type: string\n x-dcl-go-name: HealthCheck\n description: The URL for the health check that signals autohealing.\n x-dcl-references:\n - resource: Compute/HealthCheck\n field: selfLink\n initialDelaySec:\n type: integer\n format: int64\n x-dcl-go-name: InitialDelaySec\n description: The number of seconds that the managed instance group\n waits before it applies autohealing policies to new instances or\n recently recreated instances. This initial delay allows instances\n to initialize and run their startup scripts before the instance\n group determines that they are UNHEALTHY. This prevents the managed\n instance group from recreating its instances prematurely. This value\n must be from range [0, 3600].\n baseInstanceName:\n type: string\n x-dcl-go-name: BaseInstanceName\n description: The base instance name to use for instances in this group.\n The value must be 1-58 characters long. Instances are named by appending\n a hyphen and a random four-character string to the base instance name.\n The base instance name must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt).\n creationTimestamp:\n type: string\n x-dcl-go-name: CreationTimestamp\n readOnly: true\n description: The creation timestamp for this managed instance group in \\[RFC3339\\](https://www.ietf.org/rfc/rfc3339.txt)\n text format.\n x-kubernetes-immutable: true\n currentActions:\n type: object\n x-dcl-go-name: CurrentActions\n x-dcl-go-type: InstanceGroupManagerCurrentActions\n readOnly: true\n description: '[Output Only] The list of instance actions and the number\n of instances in this managed instance group that are scheduled for each\n of those actions.'\n x-kubernetes-immutable: true\n properties:\n abandoning:\n type: integer\n format: int64\n x-dcl-go-name: Abandoning\n readOnly: true\n description: '[Output Only] The total number of instances in the managed\n instance group that are scheduled to be abandoned. Abandoning an instance\n removes it from the managed instance group without deleting it.'\n x-kubernetes-immutable: true\n creating:\n type: integer\n format: int64\n x-dcl-go-name: Creating\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are scheduled to be created or are currently being created.\n If the group fails to create any of these instances, it tries again\n until it creates the instance successfully. If you have disabled creation\n retries, this field will not be populated; instead, the `creatingWithoutRetries`\n field will be populated.'\n x-kubernetes-immutable: true\n creatingWithoutRetries:\n type: integer\n format: int64\n x-dcl-go-name: CreatingWithoutRetries\n readOnly: true\n description: '[Output Only] The number of instances that the managed\n instance group will attempt to create. The group attempts to create\n each instance only once. If the group fails to create any of these\n instances, it decreases the group''s `targetSize` value accordingly.'\n x-kubernetes-immutable: true\n deleting:\n type: integer\n format: int64\n x-dcl-go-name: Deleting\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are scheduled to be deleted or are currently being deleted.'\n x-kubernetes-immutable: true\n none:\n type: integer\n format: int64\n x-dcl-go-name: None\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are running and have no scheduled actions.'\n x-kubernetes-immutable: true\n recreating:\n type: integer\n format: int64\n x-dcl-go-name: Recreating\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are scheduled to be recreated or are currently being being\n recreated. Recreating an instance deletes the existing root persistent\n disk and creates a new disk from the image that is defined in the\n instance template.'\n x-kubernetes-immutable: true\n refreshing:\n type: integer\n format: int64\n x-dcl-go-name: Refreshing\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are being reconfigured with properties that do not require\n a restart or a recreate action. For example, setting or removing target\n pools for the instance.'\n x-kubernetes-immutable: true\n restarting:\n type: integer\n format: int64\n x-dcl-go-name: Restarting\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are scheduled to be restarted or are currently being restarted.'\n x-kubernetes-immutable: true\n verifying:\n type: integer\n format: int64\n x-dcl-go-name: Verifying\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are being verified. See the `managedInstances[].currentAction`\n property in the `listManagedInstances` method documentation.'\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: An optional description of this resource.\n x-kubernetes-immutable: true\n distributionPolicy:\n type: object\n x-dcl-go-name: DistributionPolicy\n x-dcl-go-type: InstanceGroupManagerDistributionPolicy\n description: Policy specifying the intended distribution of managed instances\n across zones in a regional managed instance group.\n properties:\n targetShape:\n type: string\n x-dcl-go-name: TargetShape\n x-dcl-go-type: InstanceGroupManagerDistributionPolicyTargetShapeEnum\n description: 'The distribution shape to which the group converges either\n proactively or on resize events (depending on the value set in `updatePolicy.instanceRedistributionType`).\n Possible values: TARGET_SHAPE_UNSPECIFIED, ANY, BALANCED, ANY_SINGLE_ZONE'\n enum:\n - TARGET_SHAPE_UNSPECIFIED\n - ANY\n - BALANCED\n - ANY_SINGLE_ZONE\n zones:\n type: array\n x-dcl-go-name: Zones\n description: Zones where the regional managed instance group will create\n and manage its instances.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: InstanceGroupManagerDistributionPolicyZones\n properties:\n zone:\n type: string\n x-dcl-go-name: Zone\n description: The URL of the [zone](/compute/docs/regions-zones/#available).\n The zone must exist in the region where the managed instance\n group is located.\n x-kubernetes-immutable: true\n failoverAction:\n type: string\n x-dcl-go-name: FailoverAction\n x-dcl-go-type: InstanceGroupManagerFailoverActionEnum\n description: 'The action to perform in case of zone failure. Only one value\n is supported, `NO_FAILOVER`. The default is `NO_FAILOVER`. Possible values:\n UNKNOWN, NO_FAILOVER'\n enum:\n - UNKNOWN\n - NO_FAILOVER\n fingerprint:\n type: string\n x-dcl-go-name: Fingerprint\n readOnly: true\n description: Fingerprint of this resource. This field may be used in optimistic\n locking. It will be ignored when inserting an InstanceGroupManager. An\n up-to-date fingerprint must be provided in order to update the InstanceGroupManager,\n otherwise the request will fail with error `412 conditionNotMet`. To see\n the latest fingerprint, make a `get()` request to retrieve an InstanceGroupManager.\n x-kubernetes-immutable: true\n id:\n type: integer\n format: int64\n x-dcl-go-name: Id\n readOnly: true\n description: '[Output Only] A unique identifier for this resource type.\n The server generates this identifier.'\n x-kubernetes-immutable: true\n instanceGroup:\n type: string\n x-dcl-go-name: InstanceGroup\n readOnly: true\n description: '[Output Only] The URL of the Instance Group resource.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/InstanceGroup\n field: selfLink\n instanceTemplate:\n type: string\n x-dcl-go-name: InstanceTemplate\n description: The URL of the instance template that is specified for this\n managed instance group. The group uses this template to create all new\n instances in the managed instance group. The templates for existing instances\n in the group do not change unless you run `recreateInstances`, run `applyUpdatesToInstances`,\n or set the group's `updatePolicy.type` to `PROACTIVE`.\n x-dcl-conflicts:\n - versions\n x-dcl-server-default: true\n x-dcl-references:\n - resource: Compute/InstanceTemplate\n field: selfLink\n location:\n type: string\n x-dcl-go-name: Location\n description: The location of this resource.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of the managed instance group. The name must be 1-63\n characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt).\n x-kubernetes-immutable: true\n namedPorts:\n type: array\n x-dcl-go-name: NamedPorts\n description: Named ports configured for the Instance Groups complementary\n to this Instance Group Manager.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: InstanceGroupManagerNamedPorts\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: The name for this named port. The name must be 1-63 characters\n long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt).\n x-kubernetes-immutable: true\n port:\n type: integer\n format: int64\n x-dcl-go-name: Port\n description: The port number, which can be a value between 1 and 65535.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n region:\n type: string\n x-dcl-go-name: Region\n readOnly: true\n description: '[Output Only] The URL of the [region](/compute/docs/regions-zones/#available)\n where the managed instance group resides (for regional resources).'\n x-kubernetes-immutable: true\n selfLink:\n type: string\n x-dcl-go-name: SelfLink\n readOnly: true\n description: '[Output Only] The URL for this managed instance group. The\n server defines this URL.'\n x-kubernetes-immutable: true\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: 'The service account to be used as credentials for all operations\n performed by the managed instance group on instances. The service accounts\n needs all permissions required to create and delete instances. By default,\n the service account: {projectNumber}@cloudservices.gserviceaccount.com\n is used.'\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n statefulPolicy:\n type: object\n x-dcl-go-name: StatefulPolicy\n x-dcl-go-type: InstanceGroupManagerStatefulPolicy\n description: Stateful configuration for this Instanced Group Manager\n properties:\n preservedState:\n type: object\n x-dcl-go-name: PreservedState\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedState\n properties:\n disks:\n type: object\n additionalProperties:\n type: object\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateDisks\n properties:\n autoDelete:\n type: string\n x-dcl-go-name: AutoDelete\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum\n description: 'These stateful disks will never be deleted during\n autohealing, update or VM instance recreate operations.\n This flag is used to configure if the disk should be deleted\n after it is no longer used by the group, e.g. when the given\n instance or the whole group is deleted. Note: disks attached\n in READ_ONLY mode cannot be auto-deleted. Possible values:\n NEVER, ON_PERMANENT_INSTANCE_DELETION'\n enum:\n - NEVER\n - ON_PERMANENT_INSTANCE_DELETION\n x-dcl-go-name: Disks\n description: Disks created on the instances that will be preserved\n on instance delete, update, etc. This map is keyed with the device\n names of the disks.\n externalIps:\n type: object\n additionalProperties:\n type: object\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateExternalIps\n properties:\n autoDelete:\n type: string\n x-dcl-go-name: AutoDelete\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateExternalIpsAutoDeleteEnum\n description: 'These stateful IPs will never be released during\n autohealing, update or VM instance recreate operations.\n This flag is used to configure if the IP reservation should\n be deleted after it is no longer used by the group, e.g.\n when the given instance or the whole group is deleted. Possible\n values: NEVER, ON_PERMANENT_INSTANCE_DELETION'\n enum:\n - NEVER\n - ON_PERMANENT_INSTANCE_DELETION\n x-dcl-go-name: ExternalIps\n description: External network IPs assigned to the instances that\n will be preserved on instance delete, update, etc. This map is\n keyed with the network interface name.\n internalIps:\n type: object\n additionalProperties:\n type: object\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateInternalIps\n properties:\n autoDelete:\n type: string\n x-dcl-go-name: AutoDelete\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateInternalIpsAutoDeleteEnum\n description: 'These stateful IPs will never be released during\n autohealing, update or VM instance recreate operations.\n This flag is used to configure if the IP reservation should\n be deleted after it is no longer used by the group, e.g.\n when the given instance or the whole group is deleted. Possible\n values: NEVER, ON_PERMANENT_INSTANCE_DELETION'\n enum:\n - NEVER\n - ON_PERMANENT_INSTANCE_DELETION\n x-dcl-go-name: InternalIps\n description: Internal network IPs assigned to the instances that\n will be preserved on instance delete, update, etc. This map is\n keyed with the network interface name.\n status:\n type: object\n x-dcl-go-name: Status\n x-dcl-go-type: InstanceGroupManagerStatus\n readOnly: true\n description: '[Output Only] The status of this managed instance group.'\n properties:\n autoscaler:\n type: string\n x-dcl-go-name: Autoscaler\n readOnly: true\n description: '[Output Only] The URL of the [Autoscaler](/compute/docs/autoscaler/)\n that targets this instance group manager.'\n x-kubernetes-immutable: true\n isStable:\n type: boolean\n x-dcl-go-name: IsStable\n readOnly: true\n description: '[Output Only] A bit indicating whether the managed instance\n group is in a stable state. A stable state means that: none of the\n instances in the managed instance group is currently undergoing any\n type of change (for example, creation, restart, or deletion); no future\n changes are scheduled for instances in the managed instance group;\n and the managed instance group itself is not being modified.'\n x-kubernetes-immutable: true\n stateful:\n type: object\n x-dcl-go-name: Stateful\n x-dcl-go-type: InstanceGroupManagerStatusStateful\n readOnly: true\n description: '[Output Only] Stateful status of the given Instance Group\n Manager.'\n properties:\n hasStatefulConfig:\n type: boolean\n x-dcl-go-name: HasStatefulConfig\n readOnly: true\n description: '[Output Only] A bit indicating whether the managed\n instance group has stateful configuration, that is, if you have\n configured any items in a stateful policy or in per-instance configs.\n The group might report that it has no stateful config even when\n there is still some preserved state on a managed instance, for\n example, if you have deleted all PICs but not yet applied those\n deletions.'\n x-kubernetes-immutable: true\n isStateful:\n type: boolean\n x-dcl-go-name: IsStateful\n readOnly: true\n description: '[Output Only] A bit indicating whether the managed\n instance group has stateful configuration, that is, if you have\n configured any items in a stateful policy or in per-instance configs.\n The group might report that it has no stateful config even when\n there is still some preserved state on a managed instance, for\n example, if you have deleted all PICs but not yet applied those\n deletions. This field is deprecated in favor of has_stateful_config.'\n x-kubernetes-immutable: true\n perInstanceConfigs:\n type: object\n x-dcl-go-name: PerInstanceConfigs\n x-dcl-go-type: InstanceGroupManagerStatusStatefulPerInstanceConfigs\n readOnly: true\n description: '[Output Only] Status of per-instance configs on the\n instance.'\n properties:\n allEffective:\n type: boolean\n x-dcl-go-name: AllEffective\n description: A bit indicating if all of the group's per-instance\n configs (listed in the output of a listPerInstanceConfigs\n API call) have status `EFFECTIVE` or there are no per-instance-configs.\n versionTarget:\n type: object\n x-dcl-go-name: VersionTarget\n x-dcl-go-type: InstanceGroupManagerStatusVersionTarget\n readOnly: true\n description: '[Output Only] A status of consistency of Instances'' versions\n with their target version specified by `version` field on Instance\n Group Manager.'\n x-kubernetes-immutable: true\n properties:\n isReached:\n type: boolean\n x-dcl-go-name: IsReached\n readOnly: true\n description: '[Output Only] A bit indicating whether version target\n has been reached in this managed instance group, i.e. all instances\n are in their target version. Instances'' target version are specified\n by `version` field on Instance Group Manager.'\n x-kubernetes-immutable: true\n targetPools:\n type: array\n x-dcl-go-name: TargetPools\n description: The URLs for all TargetPool resources to which instances in\n the `instanceGroup` field are added. The target pools automatically apply\n to all of the instances in the managed instance group.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/TargetPool\n field: selfLink\n targetSize:\n type: integer\n format: int64\n x-dcl-go-name: TargetSize\n description: The target number of running instances for this managed instance\n group. You can reduce this number by using the instanceGroupManager deleteInstances\n or abandonInstances methods. Resizing the group also changes this number.\n updatePolicy:\n type: object\n x-dcl-go-name: UpdatePolicy\n x-dcl-go-type: InstanceGroupManagerUpdatePolicy\n description: The update policy for this managed instance group.\n properties:\n instanceRedistributionType:\n type: string\n x-dcl-go-name: InstanceRedistributionType\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum\n description: 'The [instance redistribution policy](/compute/docs/instance-groups/regional-migs#proactive_instance_redistribution)\n for regional managed instance groups. Valid values are: - `PROACTIVE`\n (default): The group attempts to maintain an even distribution of\n VM instances across zones in the region. - `NONE`: For non-autoscaled\n groups, proactive redistribution is disabled.'\n enum:\n - NONE\n - PROACTIVE\n maxSurge:\n type: object\n x-dcl-go-name: MaxSurge\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyMaxSurge\n description: The maximum number of instances that can be created above\n the specified `targetSize` during the update process. This value can\n be either a fixed number or, if the group has 10 or more instances,\n a percentage. If you set a percentage, the number of instances is\n rounded if necessary. The default value for `maxSurge` is a fixed\n value equal to the number of zones in which the managed instance group\n operates. At least one of either `maxSurge` or `maxUnavailable` must\n be greater than 0. Learn more about [`maxSurge`](/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups#max_surge).\n x-dcl-send-empty: true\n properties:\n calculated:\n type: integer\n format: int64\n x-dcl-go-name: Calculated\n readOnly: true\n description: '[Output Only] Absolute value of VM instances calculated\n based on the specific mode. - If the value is `fixed`, then the\n `calculated` value is equal to the `fixed` value. - If the value\n is a `percent`, then the `calculated` value is `percent`/100 *\n `targetSize`. For example, the `calculated` value of a 80% of\n a managed instance group with 150 instances would be (80/100 *\n 150) = 120 VM instances. If there is a remainder, the number is\n rounded.'\n fixed:\n type: integer\n format: int64\n x-dcl-go-name: Fixed\n description: Specifies a fixed number of VM instances. This must\n be a positive integer.\n x-dcl-send-empty: true\n percent:\n type: integer\n format: int64\n x-dcl-go-name: Percent\n description: Specifies a percentage of instances between 0 to 100%,\n inclusive. For example, specify `80` for 80%.\n x-dcl-send-empty: true\n maxUnavailable:\n type: object\n x-dcl-go-name: MaxUnavailable\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyMaxUnavailable\n description: 'The maximum number of instances that can be unavailable\n during the update process. An instance is considered available if\n all of the following conditions are satisfied: - The instance''s [status](/compute/docs/instances/checking-instance-status)\n is `RUNNING`. - If there is a [health check](/compute/docs/instance-groups/autohealing-instances-in-migs)\n on the instance group, the instance''s health check status must be\n `HEALTHY` at least once. If there is no health check on the group,\n then the instance only needs to have a status of `RUNNING` to be considered\n available. This value can be either a fixed number or, if the group\n has 10 or more instances, a percentage. If you set a percentage, the\n number of instances is rounded if necessary. The default value for\n `maxUnavailable` is a fixed value equal to the number of zones in\n which the managed instance group operates. At least one of either\n `maxSurge` or `maxUnavailable` must be greater than 0. Learn more\n about [`maxUnavailable`](/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups#max_unavailable).'\n properties:\n calculated:\n type: integer\n format: int64\n x-dcl-go-name: Calculated\n readOnly: true\n description: '[Output Only] Absolute value of VM instances calculated\n based on the specific mode. - If the value is `fixed`, then the\n `calculated` value is equal to the `fixed` value. - If the value\n is a `percent`, then the `calculated` value is `percent`/100 *\n `targetSize`. For example, the `calculated` value of a 80% of\n a managed instance group with 150 instances would be (80/100 *\n 150) = 120 VM instances. If there is a remainder, the number is\n rounded.'\n fixed:\n type: integer\n format: int64\n x-dcl-go-name: Fixed\n description: Specifies a fixed number of VM instances. This must\n be a positive integer.\n x-dcl-send-empty: true\n percent:\n type: integer\n format: int64\n x-dcl-go-name: Percent\n description: Specifies a percentage of instances between 0 to 100%,\n inclusive. For example, specify `80` for 80%.\n x-dcl-send-empty: true\n minReadySec:\n type: integer\n format: int64\n x-dcl-go-name: MinReadySec\n description: Minimum number of seconds to wait for after a newly created\n instance becomes available. This value must be from range [0, 3600].\n minimalAction:\n type: string\n x-dcl-go-name: MinimalAction\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyMinimalActionEnum\n description: Minimal action to be taken on an instance. You can specify\n either `RESTART` to restart existing instances or `REPLACE` to delete\n and create new instances from the target template. If you specify\n a `RESTART`, the Updater will attempt to perform that action only.\n However, if the Updater determines that the minimal action you specify\n is not enough to perform the update, it might perform a more disruptive\n action.\n enum:\n - REPLACE\n - RESTART\n - REFRESH\n - NONE\n mostDisruptiveAllowedAction:\n type: string\n x-dcl-go-name: MostDisruptiveAllowedAction\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedActionEnum\n description: Most disruptive action that is allowed to be taken on an\n instance. You can specify either `NONE` to forbid any actions, `REFRESH`\n to allow actions that do not need instance restart, `RESTART` to allow\n actions that can be applied without instance replacing or `REPLACE`\n to allow all possible actions. If the Updater determines that the\n minimal update action needed is more disruptive than most disruptive\n allowed action you specify it will not perform the update at all.\n enum:\n - REPLACE\n - RESTART\n - REFRESH\n - NONE\n replacementMethod:\n type: string\n x-dcl-go-name: ReplacementMethod\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyReplacementMethodEnum\n description: 'What action should be used to replace instances. See minimal_action.REPLACE\n Possible values: SUBSTITUTE, RECREATE'\n enum:\n - SUBSTITUTE\n - RECREATE\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyTypeEnum\n description: The type of update process. You can specify either `PROACTIVE`\n so that the instance group manager proactively executes actions in\n order to bring instances to their target versions or `OPPORTUNISTIC`\n so that no action is proactively executed but the update will be performed\n as part of other actions (for example, resizes or `recreateInstances`\n calls).\n enum:\n - OPPORTUNISTIC\n - PROACTIVE\n versions:\n type: array\n x-dcl-go-name: Versions\n description: Specifies the instance templates used by this managed instance\n group to create instances. Each version is defined by an `instanceTemplate`\n and a `name`. Every version can appear at most once per instance group.\n This field overrides the top-level `instanceTemplate` field. Read more\n about the [relationships between these fields](/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups#relationship_between_versions_and_instancetemplate_properties_for_a_managed_instance_group).\n Exactly one `version` must leave the `targetSize` field unset. That version\n will be applied to all remaining instances. For more information, read\n about [canary updates](/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups#starting_a_canary_update).\n x-dcl-conflicts:\n - instanceTemplate\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: InstanceGroupManagerVersions\n properties:\n instanceTemplate:\n type: string\n x-dcl-go-name: InstanceTemplate\n description: The URL of the instance template that is specified for\n this managed instance group. The group uses this template to create\n new instances in the managed instance group until the `targetSize`\n for this version is reached. The templates for existing instances\n in the group do not change unless you run `recreateInstances`, run\n `applyUpdatesToInstances`, or set the group's `updatePolicy.type`\n to `PROACTIVE`; in those cases, existing instances are updated until\n the `targetSize` for this version is reached.\n x-dcl-references:\n - resource: Compute/InstanceTemplate\n field: selfLink\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the version. Unique among all versions in the\n scope of this managed instance group.\n targetSize:\n type: object\n x-dcl-go-name: TargetSize\n x-dcl-go-type: InstanceGroupManagerVersionsTargetSize\n description: 'Specifies the intended number of instances to be created\n from the `instanceTemplate`. The final number of instances created\n from the template will be equal to: - If expressed as a fixed number,\n the minimum of either `targetSize.fixed` or `instanceGroupManager.targetSize`\n is used. - if expressed as a `percent`, the `targetSize` would be\n `(targetSize.percent/100 * InstanceGroupManager.targetSize)` If\n there is a remainder, the number is rounded. If unset, this version\n will update any remaining instances not updated by another `version`.\n Read [Starting a canary update](/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups#starting_a_canary_update)\n for more information.'\n properties:\n calculated:\n type: integer\n format: int64\n x-dcl-go-name: Calculated\n readOnly: true\n description: '[Output Only] Absolute value of VM instances calculated\n based on the specific mode. - If the value is `fixed`, then\n the `calculated` value is equal to the `fixed` value. - If the\n value is a `percent`, then the `calculated` value is `percent`/100\n * `targetSize`. For example, the `calculated` value of a 80%\n of a managed instance group with 150 instances would be (80/100\n * 150) = 120 VM instances. If there is a remainder, the number\n is rounded.'\n fixed:\n type: integer\n format: int64\n x-dcl-go-name: Fixed\n description: Specifies a fixed number of VM instances. This must\n be a positive integer.\n x-dcl-send-empty: true\n percent:\n type: integer\n format: int64\n x-dcl-go-name: Percent\n description: Specifies a percentage of instances between 0 to\n 100%, inclusive. For example, specify `80` for 80%.\n x-dcl-send-empty: true\n zone:\n type: string\n x-dcl-go-name: Zone\n readOnly: true\n description: '[Output Only] The URL of a [zone](/compute/docs/regions-zones/#available)\n where the managed instance group is located (for zonal resources).'\n x-kubernetes-immutable: true\n") +var YAML_instance_group_manager = []byte("info:\n title: Compute/InstanceGroupManager\n description: The Compute InstanceGroupManager resource\n x-dcl-struct-name: InstanceGroupManager\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a InstanceGroupManager\n parameters:\n - name: InstanceGroupManager\n required: true\n description: A full instance of a InstanceGroupManager\n apply:\n description: The function used to apply information about a InstanceGroupManager\n parameters:\n - name: InstanceGroupManager\n required: true\n description: A full instance of a InstanceGroupManager\n delete:\n description: The function used to delete a InstanceGroupManager\n parameters:\n - name: InstanceGroupManager\n required: true\n description: A full instance of a InstanceGroupManager\n deleteAll:\n description: The function used to delete all InstanceGroupManager\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many InstanceGroupManager\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n InstanceGroupManager:\n title: InstanceGroupManager\n x-dcl-locations:\n - zone\n - region\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - targetSize\n - project\n properties:\n autoHealingPolicies:\n type: array\n x-dcl-go-name: AutoHealingPolicies\n description: The autohealing policy for this managed instance group. You\n can specify only one value.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: InstanceGroupManagerAutoHealingPolicies\n properties:\n healthCheck:\n type: string\n x-dcl-go-name: HealthCheck\n description: The URL for the health check that signals autohealing.\n x-dcl-references:\n - resource: Compute/HealthCheck\n field: selfLink\n initialDelaySec:\n type: integer\n format: int64\n x-dcl-go-name: InitialDelaySec\n description: The number of seconds that the managed instance group\n waits before it applies autohealing policies to new instances or\n recently recreated instances. This initial delay allows instances\n to initialize and run their startup scripts before the instance\n group determines that they are UNHEALTHY. This prevents the managed\n instance group from recreating its instances prematurely. This value\n must be from range [0, 3600].\n baseInstanceName:\n type: string\n x-dcl-go-name: BaseInstanceName\n description: The base instance name to use for instances in this group.\n The value must be 1-58 characters long. Instances are named by appending\n a hyphen and a random four-character string to the base instance name.\n The base instance name must comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt).\n creationTimestamp:\n type: string\n x-dcl-go-name: CreationTimestamp\n readOnly: true\n description: The creation timestamp for this managed instance group in \\[RFC3339\\](https://www.ietf.org/rfc/rfc3339.txt)\n text format.\n x-kubernetes-immutable: true\n currentActions:\n type: object\n x-dcl-go-name: CurrentActions\n x-dcl-go-type: InstanceGroupManagerCurrentActions\n readOnly: true\n description: '[Output Only] The list of instance actions and the number\n of instances in this managed instance group that are scheduled for each\n of those actions.'\n x-kubernetes-immutable: true\n properties:\n abandoning:\n type: integer\n format: int64\n x-dcl-go-name: Abandoning\n readOnly: true\n description: '[Output Only] The total number of instances in the managed\n instance group that are scheduled to be abandoned. Abandoning an instance\n removes it from the managed instance group without deleting it.'\n x-kubernetes-immutable: true\n creating:\n type: integer\n format: int64\n x-dcl-go-name: Creating\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are scheduled to be created or are currently being created.\n If the group fails to create any of these instances, it tries again\n until it creates the instance successfully. If you have disabled creation\n retries, this field will not be populated; instead, the `creatingWithoutRetries`\n field will be populated.'\n x-kubernetes-immutable: true\n creatingWithoutRetries:\n type: integer\n format: int64\n x-dcl-go-name: CreatingWithoutRetries\n readOnly: true\n description: '[Output Only] The number of instances that the managed\n instance group will attempt to create. The group attempts to create\n each instance only once. If the group fails to create any of these\n instances, it decreases the group''s `targetSize` value accordingly.'\n x-kubernetes-immutable: true\n deleting:\n type: integer\n format: int64\n x-dcl-go-name: Deleting\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are scheduled to be deleted or are currently being deleted.'\n x-kubernetes-immutable: true\n none:\n type: integer\n format: int64\n x-dcl-go-name: None\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are running and have no scheduled actions.'\n x-kubernetes-immutable: true\n recreating:\n type: integer\n format: int64\n x-dcl-go-name: Recreating\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are scheduled to be recreated or are currently being being\n recreated. Recreating an instance deletes the existing root persistent\n disk and creates a new disk from the image that is defined in the\n instance template.'\n x-kubernetes-immutable: true\n refreshing:\n type: integer\n format: int64\n x-dcl-go-name: Refreshing\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are being reconfigured with properties that do not require\n a restart or a recreate action. For example, setting or removing target\n pools for the instance.'\n x-kubernetes-immutable: true\n restarting:\n type: integer\n format: int64\n x-dcl-go-name: Restarting\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are scheduled to be restarted or are currently being restarted.'\n x-kubernetes-immutable: true\n verifying:\n type: integer\n format: int64\n x-dcl-go-name: Verifying\n readOnly: true\n description: '[Output Only] The number of instances in the managed instance\n group that are being verified. See the `managedInstances[].currentAction`\n property in the `listManagedInstances` method documentation.'\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: An optional description of this resource.\n x-kubernetes-immutable: true\n distributionPolicy:\n type: object\n x-dcl-go-name: DistributionPolicy\n x-dcl-go-type: InstanceGroupManagerDistributionPolicy\n description: Policy specifying the intended distribution of managed instances\n across zones in a regional managed instance group.\n properties:\n targetShape:\n type: string\n x-dcl-go-name: TargetShape\n x-dcl-go-type: InstanceGroupManagerDistributionPolicyTargetShapeEnum\n description: 'The distribution shape to which the group converges either\n proactively or on resize events (depending on the value set in `updatePolicy.instanceRedistributionType`).\n Possible values: TARGET_SHAPE_UNSPECIFIED, ANY, BALANCED, ANY_SINGLE_ZONE'\n enum:\n - TARGET_SHAPE_UNSPECIFIED\n - ANY\n - BALANCED\n - ANY_SINGLE_ZONE\n zones:\n type: array\n x-dcl-go-name: Zones\n description: Zones where the regional managed instance group will create\n and manage its instances.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: InstanceGroupManagerDistributionPolicyZones\n properties:\n zone:\n type: string\n x-dcl-go-name: Zone\n description: The URL of the [zone](/compute/docs/regions-zones/#available).\n The zone must exist in the region where the managed instance\n group is located.\n x-kubernetes-immutable: true\n failoverAction:\n type: string\n x-dcl-go-name: FailoverAction\n x-dcl-go-type: InstanceGroupManagerFailoverActionEnum\n description: 'The action to perform in case of zone failure. Only one value\n is supported, `NO_FAILOVER`. The default is `NO_FAILOVER`. Possible values:\n UNKNOWN, NO_FAILOVER'\n enum:\n - UNKNOWN\n - NO_FAILOVER\n fingerprint:\n type: string\n x-dcl-go-name: Fingerprint\n readOnly: true\n description: Fingerprint of this resource. This field may be used in optimistic\n locking. It will be ignored when inserting an InstanceGroupManager. An\n up-to-date fingerprint must be provided in order to update the InstanceGroupManager,\n otherwise the request will fail with error `412 conditionNotMet`. To see\n the latest fingerprint, make a `get()` request to retrieve an InstanceGroupManager.\n x-kubernetes-immutable: true\n id:\n type: integer\n format: int64\n x-dcl-go-name: Id\n readOnly: true\n description: '[Output Only] A unique identifier for this resource type.\n The server generates this identifier.'\n x-kubernetes-immutable: true\n instanceGroup:\n type: string\n x-dcl-go-name: InstanceGroup\n readOnly: true\n description: '[Output Only] The URL of the Instance Group resource.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/InstanceGroup\n field: selfLink\n instanceTemplate:\n type: string\n x-dcl-go-name: InstanceTemplate\n description: The URL of the instance template that is specified for this\n managed instance group. The group uses this template to create all new\n instances in the managed instance group. The templates for existing instances\n in the group do not change unless you run `recreateInstances`, run `applyUpdatesToInstances`,\n or set the group's `updatePolicy.type` to `PROACTIVE`.\n x-dcl-conflicts:\n - versions\n x-dcl-server-default: true\n x-dcl-references:\n - resource: Compute/InstanceTemplate\n field: selfLink\n location:\n type: string\n x-dcl-go-name: Location\n description: The location of this resource.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of the managed instance group. The name must be 1-63\n characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt).\n x-kubernetes-immutable: true\n namedPorts:\n type: array\n x-dcl-go-name: NamedPorts\n description: Named ports configured for the Instance Groups complementary\n to this Instance Group Manager.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: InstanceGroupManagerNamedPorts\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: The name for this named port. The name must be 1-63 characters\n long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt).\n x-kubernetes-immutable: true\n port:\n type: integer\n format: int64\n x-dcl-go-name: Port\n description: The port number, which can be a value between 1 and 65535.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n region:\n type: string\n x-dcl-go-name: Region\n readOnly: true\n description: '[Output Only] The URL of the [region](/compute/docs/regions-zones/#available)\n where the managed instance group resides (for regional resources).'\n x-kubernetes-immutable: true\n selfLink:\n type: string\n x-dcl-go-name: SelfLink\n readOnly: true\n description: '[Output Only] The URL for this managed instance group. The\n server defines this URL.'\n x-kubernetes-immutable: true\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: The service account to be used as credentials for all operations\n performed by the managed instance group on instances. The service accounts\n needs all permissions required to create and delete instances. By default,\n the service account {projectNumber}@cloudservices.gserviceaccount.com\n is used.\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n statefulPolicy:\n type: object\n x-dcl-go-name: StatefulPolicy\n x-dcl-go-type: InstanceGroupManagerStatefulPolicy\n description: Stateful configuration for this Instanced Group Manager\n properties:\n preservedState:\n type: object\n x-dcl-go-name: PreservedState\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedState\n properties:\n disks:\n type: object\n additionalProperties:\n type: object\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateDisks\n properties:\n autoDelete:\n type: string\n x-dcl-go-name: AutoDelete\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum\n description: 'These stateful disks will never be deleted during\n autohealing, update or VM instance recreate operations.\n This flag is used to configure if the disk should be deleted\n after it is no longer used by the group, e.g. when the given\n instance or the whole group is deleted. Note: disks attached\n in READ_ONLY mode cannot be auto-deleted. Possible values:\n NEVER, ON_PERMANENT_INSTANCE_DELETION'\n enum:\n - NEVER\n - ON_PERMANENT_INSTANCE_DELETION\n x-dcl-go-name: Disks\n description: Disks created on the instances that will be preserved\n on instance delete, update, etc. This map is keyed with the device\n names of the disks.\n externalIps:\n type: object\n additionalProperties:\n type: object\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateExternalIps\n properties:\n autoDelete:\n type: string\n x-dcl-go-name: AutoDelete\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateExternalIpsAutoDeleteEnum\n description: 'These stateful IPs will never be released during\n autohealing, update or VM instance recreate operations.\n This flag is used to configure if the IP reservation should\n be deleted after it is no longer used by the group, e.g.\n when the given instance or the whole group is deleted. Possible\n values: NEVER, ON_PERMANENT_INSTANCE_DELETION'\n enum:\n - NEVER\n - ON_PERMANENT_INSTANCE_DELETION\n x-dcl-go-name: ExternalIps\n description: External network IPs assigned to the instances that\n will be preserved on instance delete, update, etc. This map is\n keyed with the network interface name.\n internalIps:\n type: object\n additionalProperties:\n type: object\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateInternalIps\n properties:\n autoDelete:\n type: string\n x-dcl-go-name: AutoDelete\n x-dcl-go-type: InstanceGroupManagerStatefulPolicyPreservedStateInternalIpsAutoDeleteEnum\n description: 'These stateful IPs will never be released during\n autohealing, update or VM instance recreate operations.\n This flag is used to configure if the IP reservation should\n be deleted after it is no longer used by the group, e.g.\n when the given instance or the whole group is deleted. Possible\n values: NEVER, ON_PERMANENT_INSTANCE_DELETION'\n enum:\n - NEVER\n - ON_PERMANENT_INSTANCE_DELETION\n x-dcl-go-name: InternalIps\n description: Internal network IPs assigned to the instances that\n will be preserved on instance delete, update, etc. This map is\n keyed with the network interface name.\n status:\n type: object\n x-dcl-go-name: Status\n x-dcl-go-type: InstanceGroupManagerStatus\n readOnly: true\n description: '[Output Only] The status of this managed instance group.'\n properties:\n autoscaler:\n type: string\n x-dcl-go-name: Autoscaler\n readOnly: true\n description: '[Output Only] The URL of the [Autoscaler](/compute/docs/autoscaler/)\n that targets this instance group manager.'\n x-kubernetes-immutable: true\n isStable:\n type: boolean\n x-dcl-go-name: IsStable\n readOnly: true\n description: '[Output Only] A bit indicating whether the managed instance\n group is in a stable state. A stable state means that: none of the\n instances in the managed instance group is currently undergoing any\n type of change (for example, creation, restart, or deletion); no future\n changes are scheduled for instances in the managed instance group;\n and the managed instance group itself is not being modified.'\n x-kubernetes-immutable: true\n stateful:\n type: object\n x-dcl-go-name: Stateful\n x-dcl-go-type: InstanceGroupManagerStatusStateful\n readOnly: true\n description: '[Output Only] Stateful status of the given Instance Group\n Manager.'\n properties:\n hasStatefulConfig:\n type: boolean\n x-dcl-go-name: HasStatefulConfig\n readOnly: true\n description: '[Output Only] A bit indicating whether the managed\n instance group has stateful configuration, that is, if you have\n configured any items in a stateful policy or in per-instance configs.\n The group might report that it has no stateful config even when\n there is still some preserved state on a managed instance, for\n example, if you have deleted all PICs but not yet applied those\n deletions.'\n x-kubernetes-immutable: true\n isStateful:\n type: boolean\n x-dcl-go-name: IsStateful\n readOnly: true\n description: '[Output Only] A bit indicating whether the managed\n instance group has stateful configuration, that is, if you have\n configured any items in a stateful policy or in per-instance configs.\n The group might report that it has no stateful config even when\n there is still some preserved state on a managed instance, for\n example, if you have deleted all PICs but not yet applied those\n deletions. This field is deprecated in favor of has_stateful_config.'\n x-kubernetes-immutable: true\n perInstanceConfigs:\n type: object\n x-dcl-go-name: PerInstanceConfigs\n x-dcl-go-type: InstanceGroupManagerStatusStatefulPerInstanceConfigs\n readOnly: true\n description: '[Output Only] Status of per-instance configs on the\n instance.'\n properties:\n allEffective:\n type: boolean\n x-dcl-go-name: AllEffective\n description: A bit indicating if all of the group's per-instance\n configs (listed in the output of a listPerInstanceConfigs\n API call) have status `EFFECTIVE` or there are no per-instance-configs.\n versionTarget:\n type: object\n x-dcl-go-name: VersionTarget\n x-dcl-go-type: InstanceGroupManagerStatusVersionTarget\n readOnly: true\n description: '[Output Only] A status of consistency of Instances'' versions\n with their target version specified by `version` field on Instance\n Group Manager.'\n x-kubernetes-immutable: true\n properties:\n isReached:\n type: boolean\n x-dcl-go-name: IsReached\n readOnly: true\n description: '[Output Only] A bit indicating whether version target\n has been reached in this managed instance group, i.e. all instances\n are in their target version. Instances'' target version are specified\n by `version` field on Instance Group Manager.'\n x-kubernetes-immutable: true\n targetPools:\n type: array\n x-dcl-go-name: TargetPools\n description: The URLs for all TargetPool resources to which instances in\n the `instanceGroup` field are added. The target pools automatically apply\n to all of the instances in the managed instance group.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/TargetPool\n field: selfLink\n targetSize:\n type: integer\n format: int64\n x-dcl-go-name: TargetSize\n description: The target number of running instances for this managed instance\n group. You can reduce this number by using the instanceGroupManager deleteInstances\n or abandonInstances methods. Resizing the group also changes this number.\n updatePolicy:\n type: object\n x-dcl-go-name: UpdatePolicy\n x-dcl-go-type: InstanceGroupManagerUpdatePolicy\n description: The update policy for this managed instance group.\n properties:\n instanceRedistributionType:\n type: string\n x-dcl-go-name: InstanceRedistributionType\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum\n description: 'The [instance redistribution policy](/compute/docs/instance-groups/regional-migs#proactive_instance_redistribution)\n for regional managed instance groups. Valid values are: - `PROACTIVE`\n (default): The group attempts to maintain an even distribution of\n VM instances across zones in the region. - `NONE`: For non-autoscaled\n groups, proactive redistribution is disabled.'\n enum:\n - NONE\n - PROACTIVE\n maxSurge:\n type: object\n x-dcl-go-name: MaxSurge\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyMaxSurge\n description: The maximum number of instances that can be created above\n the specified `targetSize` during the update process. This value can\n be either a fixed number or, if the group has 10 or more instances,\n a percentage. If you set a percentage, the number of instances is\n rounded if necessary. The default value for `maxSurge` is a fixed\n value equal to the number of zones in which the managed instance group\n operates. At least one of either `maxSurge` or `maxUnavailable` must\n be greater than 0. Learn more about [`maxSurge`](/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups#max_surge).\n x-dcl-send-empty: true\n properties:\n calculated:\n type: integer\n format: int64\n x-dcl-go-name: Calculated\n readOnly: true\n description: '[Output Only] Absolute value of VM instances calculated\n based on the specific mode. - If the value is `fixed`, then the\n `calculated` value is equal to the `fixed` value. - If the value\n is a `percent`, then the `calculated` value is `percent`/100 *\n `targetSize`. For example, the `calculated` value of a 80% of\n a managed instance group with 150 instances would be (80/100 *\n 150) = 120 VM instances. If there is a remainder, the number is\n rounded.'\n fixed:\n type: integer\n format: int64\n x-dcl-go-name: Fixed\n description: Specifies a fixed number of VM instances. This must\n be a positive integer.\n x-dcl-send-empty: true\n percent:\n type: integer\n format: int64\n x-dcl-go-name: Percent\n description: Specifies a percentage of instances between 0 to 100%,\n inclusive. For example, specify `80` for 80%.\n x-dcl-send-empty: true\n maxUnavailable:\n type: object\n x-dcl-go-name: MaxUnavailable\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyMaxUnavailable\n description: 'The maximum number of instances that can be unavailable\n during the update process. An instance is considered available if\n all of the following conditions are satisfied: - The instance''s [status](/compute/docs/instances/checking-instance-status)\n is `RUNNING`. - If there is a [health check](/compute/docs/instance-groups/autohealing-instances-in-migs)\n on the instance group, the instance''s health check status must be\n `HEALTHY` at least once. If there is no health check on the group,\n then the instance only needs to have a status of `RUNNING` to be considered\n available. This value can be either a fixed number or, if the group\n has 10 or more instances, a percentage. If you set a percentage, the\n number of instances is rounded if necessary. The default value for\n `maxUnavailable` is a fixed value equal to the number of zones in\n which the managed instance group operates. At least one of either\n `maxSurge` or `maxUnavailable` must be greater than 0. Learn more\n about [`maxUnavailable`](/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups#max_unavailable).'\n properties:\n calculated:\n type: integer\n format: int64\n x-dcl-go-name: Calculated\n readOnly: true\n description: '[Output Only] Absolute value of VM instances calculated\n based on the specific mode. - If the value is `fixed`, then the\n `calculated` value is equal to the `fixed` value. - If the value\n is a `percent`, then the `calculated` value is `percent`/100 *\n `targetSize`. For example, the `calculated` value of a 80% of\n a managed instance group with 150 instances would be (80/100 *\n 150) = 120 VM instances. If there is a remainder, the number is\n rounded.'\n fixed:\n type: integer\n format: int64\n x-dcl-go-name: Fixed\n description: Specifies a fixed number of VM instances. This must\n be a positive integer.\n x-dcl-send-empty: true\n percent:\n type: integer\n format: int64\n x-dcl-go-name: Percent\n description: Specifies a percentage of instances between 0 to 100%,\n inclusive. For example, specify `80` for 80%.\n x-dcl-send-empty: true\n minReadySec:\n type: integer\n format: int64\n x-dcl-go-name: MinReadySec\n description: Minimum number of seconds to wait for after a newly created\n instance becomes available. This value must be from range [0, 3600].\n minimalAction:\n type: string\n x-dcl-go-name: MinimalAction\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyMinimalActionEnum\n description: Minimal action to be taken on an instance. You can specify\n either `RESTART` to restart existing instances or `REPLACE` to delete\n and create new instances from the target template. If you specify\n a `RESTART`, the Updater will attempt to perform that action only.\n However, if the Updater determines that the minimal action you specify\n is not enough to perform the update, it might perform a more disruptive\n action.\n enum:\n - REPLACE\n - RESTART\n - REFRESH\n - NONE\n mostDisruptiveAllowedAction:\n type: string\n x-dcl-go-name: MostDisruptiveAllowedAction\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedActionEnum\n description: Most disruptive action that is allowed to be taken on an\n instance. You can specify either `NONE` to forbid any actions, `REFRESH`\n to allow actions that do not need instance restart, `RESTART` to allow\n actions that can be applied without instance replacing or `REPLACE`\n to allow all possible actions. If the Updater determines that the\n minimal update action needed is more disruptive than most disruptive\n allowed action you specify it will not perform the update at all.\n enum:\n - REPLACE\n - RESTART\n - REFRESH\n - NONE\n replacementMethod:\n type: string\n x-dcl-go-name: ReplacementMethod\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyReplacementMethodEnum\n description: 'What action should be used to replace instances. See minimal_action.REPLACE\n Possible values: SUBSTITUTE, RECREATE'\n enum:\n - SUBSTITUTE\n - RECREATE\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: InstanceGroupManagerUpdatePolicyTypeEnum\n description: The type of update process. You can specify either `PROACTIVE`\n so that the instance group manager proactively executes actions in\n order to bring instances to their target versions or `OPPORTUNISTIC`\n so that no action is proactively executed but the update will be performed\n as part of other actions (for example, resizes or `recreateInstances`\n calls).\n enum:\n - OPPORTUNISTIC\n - PROACTIVE\n versions:\n type: array\n x-dcl-go-name: Versions\n description: Specifies the instance templates used by this managed instance\n group to create instances. Each version is defined by an `instanceTemplate`\n and a `name`. Every version can appear at most once per instance group.\n This field overrides the top-level `instanceTemplate` field. Read more\n about the [relationships between these fields](/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups#relationship_between_versions_and_instancetemplate_properties_for_a_managed_instance_group).\n Exactly one `version` must leave the `targetSize` field unset. That version\n will be applied to all remaining instances. For more information, read\n about [canary updates](/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups#starting_a_canary_update).\n x-dcl-conflicts:\n - instanceTemplate\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: InstanceGroupManagerVersions\n properties:\n instanceTemplate:\n type: string\n x-dcl-go-name: InstanceTemplate\n description: The URL of the instance template that is specified for\n this managed instance group. The group uses this template to create\n new instances in the managed instance group until the `targetSize`\n for this version is reached. The templates for existing instances\n in the group do not change unless you run `recreateInstances`, run\n `applyUpdatesToInstances`, or set the group's `updatePolicy.type`\n to `PROACTIVE`; in those cases, existing instances are updated until\n the `targetSize` for this version is reached.\n x-dcl-references:\n - resource: Compute/InstanceTemplate\n field: selfLink\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the version. Unique among all versions in the\n scope of this managed instance group.\n targetSize:\n type: object\n x-dcl-go-name: TargetSize\n x-dcl-go-type: InstanceGroupManagerVersionsTargetSize\n description: 'Specifies the intended number of instances to be created\n from the `instanceTemplate`. The final number of instances created\n from the template will be equal to: - If expressed as a fixed number,\n the minimum of either `targetSize.fixed` or `instanceGroupManager.targetSize`\n is used. - if expressed as a `percent`, the `targetSize` would be\n `(targetSize.percent/100 * InstanceGroupManager.targetSize)` If\n there is a remainder, the number is rounded. If unset, this version\n will update any remaining instances not updated by another `version`.\n Read [Starting a canary update](/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups#starting_a_canary_update)\n for more information.'\n properties:\n calculated:\n type: integer\n format: int64\n x-dcl-go-name: Calculated\n readOnly: true\n description: '[Output Only] Absolute value of VM instances calculated\n based on the specific mode. - If the value is `fixed`, then\n the `calculated` value is equal to the `fixed` value. - If the\n value is a `percent`, then the `calculated` value is `percent`/100\n * `targetSize`. For example, the `calculated` value of a 80%\n of a managed instance group with 150 instances would be (80/100\n * 150) = 120 VM instances. If there is a remainder, the number\n is rounded.'\n fixed:\n type: integer\n format: int64\n x-dcl-go-name: Fixed\n description: Specifies a fixed number of VM instances. This must\n be a positive integer.\n x-dcl-send-empty: true\n percent:\n type: integer\n format: int64\n x-dcl-go-name: Percent\n description: Specifies a percentage of instances between 0 to\n 100%, inclusive. For example, specify `80` for 80%.\n x-dcl-send-empty: true\n zone:\n type: string\n x-dcl-go-name: Zone\n readOnly: true\n description: '[Output Only] The URL of a [zone](/compute/docs/regions-zones/#available)\n where the managed instance group is located (for zonal resources).'\n x-kubernetes-immutable: true\n") -// 40804 bytes -// MD5: 8dbe43aa894395dcf08cecf0585dbca6 +// 40801 bytes +// MD5: ebb82828bbe70f5b10f2d377cd3a7562 diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager_schema.go index d6fdb84593..7dce14c72d 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/instance_group_manager_schema.go @@ -403,7 +403,7 @@ func DCLInstanceGroupManagerSchema() *dcl.Schema { "serviceAccount": &dcl.Property{ Type: "string", GoName: "ServiceAccount", - Description: "The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account: {projectNumber}@cloudservices.gserviceaccount.com is used.", + Description: "The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account {projectNumber}@cloudservices.gserviceaccount.com is used.", ResourceReferences: []*dcl.PropertyResourceReference{ &dcl.PropertyResourceReference{ Resource: "Iam/ServiceAccount", diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/interconnect_attachment.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/interconnect_attachment.go index 9aa6b0bed2..38027d9d93 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/interconnect_attachment.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/interconnect_attachment.go @@ -455,10 +455,9 @@ func (c *Client) GetInterconnectAttachment(ctx context.Context, r *InterconnectA if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Region = nr.Region - result.Name = nr.Name + result.Project = r.Project + result.Region = r.Region + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/network.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/network.go index 60d1ba8257..ccfb69cb49 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/network.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/network.go @@ -227,9 +227,8 @@ func (c *Client) GetNetwork(ctx context.Context, r *Network) (*Network, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name if dcl.IsZeroValue(result.AutoCreateSubnetworks) { result.AutoCreateSubnetworks = dcl.Bool(true) } diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/network_endpoint_group.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/network_endpoint_group.yaml index 324f065494..0dd2c6da52 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/network_endpoint_group.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/network_endpoint_group.yaml @@ -103,14 +103,14 @@ components: urlMask: type: string x-dcl-go-name: UrlMask - description: 'A template to parse `service` and `version` fields from + description: A template to parse `service` and `version` fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "`foo1-dot-appname.appspot.com/v1`" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "`-dot-appname.appspot.com/`". The URL mask will - parse them to: { service = "foo1", version = "v1" } and { service - = "foo1", version = "v2" } respectively.' + parse them to { service = "foo1", version = "v1" } and { service = + "foo1", version = "v2" } respectively. x-kubernetes-immutable: true version: type: string @@ -146,13 +146,13 @@ components: urlMask: type: string x-dcl-go-name: UrlMask - description: 'A template to parse `function` field from a request URL. + description: A template to parse `function` field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs "`mydomain.com/function1`" and "`mydomain.com/function2`" can be backed by the same Serverless NEG with URL mask "`/`". The - URL mask will parse them to: { function = "function1" } and { function - = "function2" } respectively.' + URL mask will parse them to { function = "function1" } and { function + = "function2" } respectively. x-kubernetes-immutable: true cloudRun: type: object @@ -180,13 +180,13 @@ components: urlMask: type: string x-dcl-go-name: UrlMask - description: 'A template to parse `service` and `tag` fields from a - request URL. URL mask allows for routing to multiple Run services - without having to create multiple network endpoint groups and backend - services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" + description: A template to parse `service` and `tag` fields from a request + URL. URL mask allows for routing to multiple Run services without + having to create multiple network endpoint groups and backend services. + For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) - with URL mask "`.domain.com/`". The URL mask will parse them to: { - service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively.' + with URL mask "`.domain.com/`". The URL mask will parse them to { + service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. x-kubernetes-immutable: true defaultPort: type: integer diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/packet_mirroring.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/packet_mirroring.go index 8a993b09a0..553206fe12 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/packet_mirroring.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/packet_mirroring.go @@ -517,10 +517,9 @@ func (c *Client) GetPacketMirroring(ctx context.Context, r *PacketMirroring) (*P if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route.go index 9903627a76..6f76b9289d 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route.go @@ -249,9 +249,8 @@ func (c *Client) GetRoute(ctx context.Context, r *Route) (*Route, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name if dcl.IsZeroValue(result.Priority) { result.Priority = dcl.Int64(1000) } diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route.yaml index 5056edaeed..6b4e44255b 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route.yaml @@ -274,7 +274,7 @@ components: [Output Only] Metadata about this warning in key: value format. For example:
"data": [
-                   : {
+                    {
                      "key": "scope",
                      "value": "zones/us-east1-d"
                     }
diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route_beta_yaml_embed.go index c159f6f31b..9679714f43 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/compute/beta/route.yaml -var YAML_route = []byte("info:\n title: Compute/Route\n description: The Compute Route resource\n x-dcl-struct-name: Route\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Route\n parameters:\n - name: Route\n required: true\n description: A full instance of a Route\n apply:\n description: The function used to apply information about a Route\n parameters:\n - name: Route\n required: true\n description: A full instance of a Route\n delete:\n description: The function used to delete a Route\n parameters:\n - name: Route\n required: true\n description: A full instance of a Route\n deleteAll:\n description: The function used to delete all Route\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Route\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Route:\n title: Route\n x-dcl-id: projects/{{project}}/global/routes/{{name}}\n x-dcl-locations:\n - global\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - network\n - destRange\n - project\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: |-\n An optional description of this resource. Provide this field when you\n create the resource.\n x-kubernetes-immutable: true\n destRange:\n type: string\n x-dcl-go-name: DestRange\n description: The destination range of the route.\n x-kubernetes-immutable: true\n id:\n type: integer\n format: int64\n x-dcl-go-name: Id\n readOnly: true\n description: |-\n [Output Only] The unique identifier for the resource. This identifier is\n defined by the server.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: |-\n Name of the resource. Provided by the client when the resource is created.\n The name must be 1-63 characters long, and comply with\n RFC1035.\n Specifically, the name must be 1-63 characters long and match the regular\n expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a\n lowercase letter, and all following characters (except for the last\n character) must be a dash, lowercase letter, or digit. The last character\n must be a lowercase letter or digit.\n x-kubernetes-immutable: true\n network:\n type: string\n x-dcl-go-name: Network\n description: Fully-qualified URL of the network that this route applies\n to.\n x-kubernetes-immutable: true\n nextHopGateway:\n type: string\n x-dcl-go-name: NextHopGateway\n description: |-\n The URL to a gateway that should handle matching packets.\n You can only specify the internet gateway using a full or\n partial valid URL:
\n projects/project/global/gateways/default-internet-gateway\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - nextHopVpnTunnel\n - nextHopIP\n - nextHopInstance\n - nextHopIlb\n nextHopIP:\n type: string\n x-dcl-go-name: NextHopIP\n description: |-\n The network IP address of an instance that should handle matching packets.\n Only IPv4 is supported.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - nextHopVpnTunnel\n - nextHopInstance\n - nextHopGateway\n - nextHopIlb\n x-dcl-server-default: true\n nextHopIlb:\n type: string\n x-dcl-go-name: NextHopIlb\n description: |-\n The URL to a forwarding rule of type\n loadBalancingScheme=INTERNAL that should handle matching\n packets. You can only specify the forwarding rule as a partial or full\n URL. For example, the following are all valid URLs:\n
    \n
  • https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule
  • regions/region/forwardingRules/forwardingRule
  • \n
\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - nextHopVpnTunnel\n - nextHopIP\n - nextHopInstance\n - nextHopGateway\n nextHopInstance:\n type: string\n x-dcl-go-name: NextHopInstance\n description: |-\n The URL to an instance that should handle matching packets. You can specify\n this as a full or partial URL.\n For example:
\n https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - nextHopVpnTunnel\n - nextHopIP\n - nextHopGateway\n - nextHopIlb\n nextHopNetwork:\n type: string\n x-dcl-go-name: NextHopNetwork\n readOnly: true\n description: The URL of the local network if it should handle matching packets.\n x-kubernetes-immutable: true\n nextHopPeering:\n type: string\n x-dcl-go-name: NextHopPeering\n readOnly: true\n description: |-\n [Output Only] The network peering name that should handle matching packets,\n which should conform to RFC1035.\n x-kubernetes-immutable: true\n nextHopVpnTunnel:\n type: string\n x-dcl-go-name: NextHopVpnTunnel\n description: The URL to a VpnTunnel that should handle matching packets.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - nextHopIP\n - nextHopInstance\n - nextHopGateway\n - nextHopIlb\n priority:\n type: integer\n format: int64\n x-dcl-go-name: Priority\n description: The priority of the peering route.\n x-kubernetes-immutable: true\n default: 1000\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n selfLink:\n type: string\n x-dcl-go-name: SelfLink\n readOnly: true\n description: '[Output Only] Server-defined fully-qualified URL for this\n resource.'\n x-kubernetes-immutable: true\n tag:\n type: array\n x-dcl-go-name: Tag\n description: A list of instance tags to which this route applies.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n warning:\n type: array\n x-dcl-go-name: Warning\n readOnly: true\n description: |-\n [Output Only] If potential misconfigurations are detected for this\n route, this field will be populated with warning messages.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: RouteWarning\n properties:\n code:\n type: string\n x-dcl-go-name: Code\n x-dcl-go-type: RouteWarningCodeEnum\n readOnly: true\n description: |-\n [Output Only] A warning code, if applicable. For example, Compute\n Engine returns NO_RESULTS_ON_PAGE if there\n are no results in the response. Possible values: BAD_REQUEST, FORBIDDEN, NOT_FOUND, CONFLICT, GONE, PRECONDITION_FAILED, INTERNAL_ERROR, SERVICE_UNAVAILABLE\n x-kubernetes-immutable: true\n enum:\n - BAD_REQUEST\n - FORBIDDEN\n - NOT_FOUND\n - CONFLICT\n - GONE\n - PRECONDITION_FAILED\n - INTERNAL_ERROR\n - SERVICE_UNAVAILABLE\n data:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Data\n readOnly: true\n description: |-\n [Output Only] Metadata about this warning in key:\n value format. For example:\n
\"data\": [\n                   : {\n                     \"key\": \"scope\",\n                     \"value\": \"zones/us-east1-d\"\n                    }
\n x-kubernetes-immutable: true\n message:\n type: string\n x-dcl-go-name: Message\n readOnly: true\n description: '[Output Only] A human-readable description of the warning\n code.'\n x-kubernetes-immutable: true\n") +var YAML_route = []byte("info:\n title: Compute/Route\n description: The Compute Route resource\n x-dcl-struct-name: Route\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Route\n parameters:\n - name: Route\n required: true\n description: A full instance of a Route\n apply:\n description: The function used to apply information about a Route\n parameters:\n - name: Route\n required: true\n description: A full instance of a Route\n delete:\n description: The function used to delete a Route\n parameters:\n - name: Route\n required: true\n description: A full instance of a Route\n deleteAll:\n description: The function used to delete all Route\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Route\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Route:\n title: Route\n x-dcl-id: projects/{{project}}/global/routes/{{name}}\n x-dcl-locations:\n - global\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - network\n - destRange\n - project\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: |-\n An optional description of this resource. Provide this field when you\n create the resource.\n x-kubernetes-immutable: true\n destRange:\n type: string\n x-dcl-go-name: DestRange\n description: The destination range of the route.\n x-kubernetes-immutable: true\n id:\n type: integer\n format: int64\n x-dcl-go-name: Id\n readOnly: true\n description: |-\n [Output Only] The unique identifier for the resource. This identifier is\n defined by the server.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: |-\n Name of the resource. Provided by the client when the resource is created.\n The name must be 1-63 characters long, and comply with\n RFC1035.\n Specifically, the name must be 1-63 characters long and match the regular\n expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a\n lowercase letter, and all following characters (except for the last\n character) must be a dash, lowercase letter, or digit. The last character\n must be a lowercase letter or digit.\n x-kubernetes-immutable: true\n network:\n type: string\n x-dcl-go-name: Network\n description: Fully-qualified URL of the network that this route applies\n to.\n x-kubernetes-immutable: true\n nextHopGateway:\n type: string\n x-dcl-go-name: NextHopGateway\n description: |-\n The URL to a gateway that should handle matching packets.\n You can only specify the internet gateway using a full or\n partial valid URL:
\n projects/project/global/gateways/default-internet-gateway\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - nextHopVpnTunnel\n - nextHopIP\n - nextHopInstance\n - nextHopIlb\n nextHopIP:\n type: string\n x-dcl-go-name: NextHopIP\n description: |-\n The network IP address of an instance that should handle matching packets.\n Only IPv4 is supported.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - nextHopVpnTunnel\n - nextHopInstance\n - nextHopGateway\n - nextHopIlb\n x-dcl-server-default: true\n nextHopIlb:\n type: string\n x-dcl-go-name: NextHopIlb\n description: |-\n The URL to a forwarding rule of type\n loadBalancingScheme=INTERNAL that should handle matching\n packets. You can only specify the forwarding rule as a partial or full\n URL. For example, the following are all valid URLs:\n
    \n
  • https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule
  • regions/region/forwardingRules/forwardingRule
  • \n
\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - nextHopVpnTunnel\n - nextHopIP\n - nextHopInstance\n - nextHopGateway\n nextHopInstance:\n type: string\n x-dcl-go-name: NextHopInstance\n description: |-\n The URL to an instance that should handle matching packets. You can specify\n this as a full or partial URL.\n For example:
\n https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - nextHopVpnTunnel\n - nextHopIP\n - nextHopGateway\n - nextHopIlb\n nextHopNetwork:\n type: string\n x-dcl-go-name: NextHopNetwork\n readOnly: true\n description: The URL of the local network if it should handle matching packets.\n x-kubernetes-immutable: true\n nextHopPeering:\n type: string\n x-dcl-go-name: NextHopPeering\n readOnly: true\n description: |-\n [Output Only] The network peering name that should handle matching packets,\n which should conform to RFC1035.\n x-kubernetes-immutable: true\n nextHopVpnTunnel:\n type: string\n x-dcl-go-name: NextHopVpnTunnel\n description: The URL to a VpnTunnel that should handle matching packets.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - nextHopIP\n - nextHopInstance\n - nextHopGateway\n - nextHopIlb\n priority:\n type: integer\n format: int64\n x-dcl-go-name: Priority\n description: The priority of the peering route.\n x-kubernetes-immutable: true\n default: 1000\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n selfLink:\n type: string\n x-dcl-go-name: SelfLink\n readOnly: true\n description: '[Output Only] Server-defined fully-qualified URL for this\n resource.'\n x-kubernetes-immutable: true\n tag:\n type: array\n x-dcl-go-name: Tag\n description: A list of instance tags to which this route applies.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n warning:\n type: array\n x-dcl-go-name: Warning\n readOnly: true\n description: |-\n [Output Only] If potential misconfigurations are detected for this\n route, this field will be populated with warning messages.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: RouteWarning\n properties:\n code:\n type: string\n x-dcl-go-name: Code\n x-dcl-go-type: RouteWarningCodeEnum\n readOnly: true\n description: |-\n [Output Only] A warning code, if applicable. For example, Compute\n Engine returns NO_RESULTS_ON_PAGE if there\n are no results in the response. Possible values: BAD_REQUEST, FORBIDDEN, NOT_FOUND, CONFLICT, GONE, PRECONDITION_FAILED, INTERNAL_ERROR, SERVICE_UNAVAILABLE\n x-kubernetes-immutable: true\n enum:\n - BAD_REQUEST\n - FORBIDDEN\n - NOT_FOUND\n - CONFLICT\n - GONE\n - PRECONDITION_FAILED\n - INTERNAL_ERROR\n - SERVICE_UNAVAILABLE\n data:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Data\n readOnly: true\n description: |-\n [Output Only] Metadata about this warning in key:\n value format. For example:\n
\"data\": [\n                    {\n                     \"key\": \"scope\",\n                     \"value\": \"zones/us-east1-d\"\n                    }
\n x-kubernetes-immutable: true\n message:\n type: string\n x-dcl-go-name: Message\n readOnly: true\n description: '[Output Only] A human-readable description of the warning\n code.'\n x-kubernetes-immutable: true\n") -// 9949 bytes -// MD5: 87071f08b55d6bf586782a7be110a285 +// 9948 bytes +// MD5: 7ee7530ff89e58c7c641decca6d88e4a diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route_schema.go index 52d5a0ef7b..bc7340cc10 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/route_schema.go @@ -282,7 +282,7 @@ func DCLRouteSchema() *dcl.Schema { }, GoName: "Data", ReadOnly: true, - Description: "[Output Only] Metadata about this warning in key:\nvalue format. For example:\n
\"data\": [\n : {\n   \"key\": \"scope\",\n   \"value\": \"zones/us-east1-d\"\n  }
", + Description: "[Output Only] Metadata about this warning in key:\nvalue format. For example:\n
\"data\": [\n  {\n   \"key\": \"scope\",\n   \"value\": \"zones/us-east1-d\"\n  }
", Immutable: true, }, "message": &dcl.Property{ diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/service_attachment.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/service_attachment.go index 46b8bee9c2..aa8914aa00 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/service_attachment.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/service_attachment.go @@ -373,10 +373,9 @@ func (c *Client) GetServiceAttachment(ctx context.Context, r *ServiceAttachment) if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/subnetwork.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/subnetwork.go index 591031e4e3..31a00848a3 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/subnetwork.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/subnetwork.go @@ -392,10 +392,9 @@ func (c *Client) GetSubnetwork(ctx context.Context, r *Subnetwork) (*Subnetwork, if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Region = nr.Region - result.Name = nr.Name + result.Project = r.Project + result.Region = r.Region + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/vpn_tunnel.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/vpn_tunnel.go index de00fcf85c..8d7f2f0283 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/vpn_tunnel.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta/vpn_tunnel.go @@ -206,10 +206,9 @@ func (c *Client) GetVpnTunnel(ctx context.Context, r *VpnTunnel) (*VpnTunnel, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Region = nr.Region - result.Name = nr.Name + result.Project = r.Project + result.Region = r.Region + result.Name = r.Name if dcl.IsZeroValue(result.IkeVersion) { result.IkeVersion = dcl.Int64(2) } diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/configcontroller/alpha/instance.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/configcontroller/alpha/instance.go index fd8decf8ac..dbd825f90b 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/configcontroller/alpha/instance.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/configcontroller/alpha/instance.go @@ -385,10 +385,9 @@ func (c *Client) GetInstance(ctx context.Context, r *Instance) (*Instance, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeranalysis/note.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeranalysis/note.go index f7b804f5e2..2fc2b42599 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeranalysis/note.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeranalysis/note.go @@ -1615,9 +1615,8 @@ func (c *Client) GetNote(ctx context.Context, r *Note) (*Note, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/cluster.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/cluster.go index 1dc56842f8..bd0f92998c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/cluster.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/cluster.go @@ -1124,10 +1124,9 @@ func (c *Client) GetCluster(ctx context.Context, r *Cluster) (*Cluster, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool.go index 21f756e1fe..65a048ab7c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool.go @@ -746,11 +746,10 @@ func (c *Client) GetNodePool(ctx context.Context, r *NodePool) (*NodePool, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Cluster = nr.Cluster - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Cluster = r.Cluster + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool.yaml index dc7aebd6a0..e9e4facf1a 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool.yaml @@ -201,7 +201,7 @@ components: type: string x-dcl-go-name: Labels description: 'Optional. The initial labels assigned to nodes of this - node pool. An object containing a list of "key": value pairs. Example: + node pool. An object containing a list of "key": value pairs. Example { "name": "wrench", "mass": "1.3kg", "count": "3" }.' x-kubernetes-immutable: true proxyConfig: diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool_beta_yaml_embed.go index 85555073d7..aa8c91d3c8 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/containeraws/beta/node_pool.yaml -var YAML_node_pool = []byte("info:\n title: ContainerAws/NodePool\n description: An Anthos node pool running on AWS.\n x-dcl-struct-name: NodePool\n x-dcl-has-iam: false\n x-dcl-ref:\n text: API reference\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud/reference/rest/v1/projects.locations.awsClusters.awsNodePools\n x-dcl-guides:\n - text: Multicloud overview\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud\npaths:\n get:\n description: The function used to get information about a NodePool\n parameters:\n - name: NodePool\n required: true\n description: A full instance of a NodePool\n apply:\n description: The function used to apply information about a NodePool\n parameters:\n - name: NodePool\n required: true\n description: A full instance of a NodePool\n delete:\n description: The function used to delete a NodePool\n parameters:\n - name: NodePool\n required: true\n description: A full instance of a NodePool\n deleteAll:\n description: The function used to delete all NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n NodePool:\n title: NodePool\n x-dcl-id: projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - version\n - config\n - autoscaling\n - subnetId\n - maxPodsConstraint\n - project\n - location\n - cluster\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'Optional. Annotations on the node pool. This field has the\n same restrictions as Kubernetes annotations. The total size of all keys\n and values combined is limited to 256k. Key can have 2 segments: prefix\n (optional) and name (required), separated by a slash (/). Prefix must\n be a DNS subdomain. Name must be 63 characters or less, begin and end\n with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics\n between.'\n autoscaling:\n type: object\n x-dcl-go-name: Autoscaling\n x-dcl-go-type: NodePoolAutoscaling\n description: Autoscaler configuration for this node pool.\n required:\n - minNodeCount\n - maxNodeCount\n properties:\n maxNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MaxNodeCount\n description: Maximum number of nodes in the NodePool. Must be >= min_node_count.\n minNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MinNodeCount\n description: Minimum number of nodes in the NodePool. Must be >= 1 and\n <= max_node_count.\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: The awsCluster for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Gkemulticloud/Cluster\n field: name\n parent: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: NodePoolConfig\n description: The configuration of the node pool.\n required:\n - iamInstanceProfile\n - configEncryption\n properties:\n configEncryption:\n type: object\n x-dcl-go-name: ConfigEncryption\n x-dcl-go-type: NodePoolConfigConfigEncryption\n description: The ARN of the AWS KMS key used to encrypt node pool configuration.\n required:\n - kmsKeyArn\n properties:\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: The ARN of the AWS KMS key used to encrypt node pool\n configuration.\n iamInstanceProfile:\n type: string\n x-dcl-go-name: IamInstanceProfile\n description: The name of the AWS IAM role assigned to nodes in the pool.\n imageType:\n type: string\n x-dcl-go-name: ImageType\n description: The OS image type to use on node pool instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n instancePlacement:\n type: object\n x-dcl-go-name: InstancePlacement\n x-dcl-go-type: NodePoolConfigInstancePlacement\n description: Details of placement information for an instance.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n tenancy:\n type: string\n x-dcl-go-name: Tenancy\n x-dcl-go-type: NodePoolConfigInstancePlacementTenancyEnum\n description: 'The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED,\n DEFAULT, DEDICATED, HOST'\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n enum:\n - TENANCY_UNSPECIFIED\n - DEFAULT\n - DEDICATED\n - HOST\n instanceType:\n type: string\n x-dcl-go-name: InstanceType\n description: Optional. The AWS instance type. When unspecified, it defaults\n to `m5.large`.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The initial labels assigned to nodes of this\n node pool. An object containing a list of \"key\": value pairs. Example:\n { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.'\n x-kubernetes-immutable: true\n proxyConfig:\n type: object\n x-dcl-go-name: ProxyConfig\n x-dcl-go-type: NodePoolConfigProxyConfig\n description: Proxy configuration for outbound HTTP(S) traffic.\n required:\n - secretArn\n - secretVersion\n properties:\n secretArn:\n type: string\n x-dcl-go-name: SecretArn\n description: The ARN of the AWS Secret Manager secret that contains\n the HTTP(S) proxy configuration.\n secretVersion:\n type: string\n x-dcl-go-name: SecretVersion\n description: The version string of the AWS Secret Manager secret\n that contains the HTTP(S) proxy configuration.\n rootVolume:\n type: object\n x-dcl-go-name: RootVolume\n x-dcl-go-type: NodePoolConfigRootVolume\n description: Optional. Template for the root volume provisioned for\n node pool nodes. Volumes will be provisioned in the availability zone\n assigned to the node pool subnet. When unspecified, it defaults to\n 32 GiB with the GP2 volume type.\n x-dcl-server-default: true\n properties:\n iops:\n type: integer\n format: int64\n x-dcl-go-name: Iops\n description: Optional. The number of I/O operations per second (IOPS)\n to provision for GP3 volume.\n x-dcl-server-default: true\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: Optional. The Amazon Resource Name (ARN) of the Customer\n Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified,\n the default Amazon managed key associated to the AWS region where\n this cluster runs will be used.\n sizeGib:\n type: integer\n format: int64\n x-dcl-go-name: SizeGib\n description: Optional. The size of the volume, in GiBs. When unspecified,\n a default value is provided. See the specific reference in the\n parent resource.\n x-dcl-server-default: true\n volumeType:\n type: string\n x-dcl-go-name: VolumeType\n x-dcl-go-type: NodePoolConfigRootVolumeVolumeTypeEnum\n description: 'Optional. Type of the EBS volume. When unspecified,\n it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED,\n GP2, GP3'\n x-dcl-server-default: true\n enum:\n - VOLUME_TYPE_UNSPECIFIED\n - GP2\n - GP3\n securityGroupIds:\n type: array\n x-dcl-go-name: SecurityGroupIds\n description: Optional. The IDs of additional security groups to add\n to nodes in this pool. The manager will automatically create security\n groups with minimum rules needed for a functioning cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n sshConfig:\n type: object\n x-dcl-go-name: SshConfig\n x-dcl-go-type: NodePoolConfigSshConfig\n description: Optional. The SSH configuration.\n required:\n - ec2KeyPair\n properties:\n ec2KeyPair:\n type: string\n x-dcl-go-name: Ec2KeyPair\n description: The name of the EC2 key pair used to login into cluster\n machines.\n tags:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Tags\n description: Optional. Key/value metadata to assign to each underlying\n AWS resource. Specify at most 50 pairs containing alphanumerics, spaces,\n and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters.\n Values can be up to 255 Unicode characters.\n x-kubernetes-immutable: true\n taints:\n type: array\n x-dcl-go-name: Taints\n description: Optional. The initial taints assigned to nodes of this\n node pool.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NodePoolConfigTaints\n required:\n - key\n - value\n - effect\n properties:\n effect:\n type: string\n x-dcl-go-name: Effect\n x-dcl-go-type: NodePoolConfigTaintsEffectEnum\n description: 'The taint effect. Possible values: EFFECT_UNSPECIFIED,\n NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE'\n x-kubernetes-immutable: true\n enum:\n - EFFECT_UNSPECIFIED\n - NO_SCHEDULE\n - PREFER_NO_SCHEDULE\n - NO_EXECUTE\n key:\n type: string\n x-dcl-go-name: Key\n description: Key for the taint.\n x-kubernetes-immutable: true\n value:\n type: string\n x-dcl-go-name: Value\n description: Value for the taint.\n x-kubernetes-immutable: true\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which this node pool was created.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Allows clients to perform consistent read-modify-writes through\n optimistic concurrency control. May be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n maxPodsConstraint:\n type: object\n x-dcl-go-name: MaxPodsConstraint\n x-dcl-go-type: NodePoolMaxPodsConstraint\n description: The constraint on the maximum number of pods that can be run\n simultaneously on a node in the node pool.\n x-kubernetes-immutable: true\n required:\n - maxPodsPerNode\n properties:\n maxPodsPerNode:\n type: integer\n format: int64\n x-dcl-go-name: MaxPodsPerNode\n description: The maximum number of pods to schedule on a single node.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of this resource.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: Output only. If set, there are currently changes in flight\n to the node pool.\n x-kubernetes-immutable: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: NodePoolStateEnum\n readOnly: true\n description: 'Output only. The lifecycle state of the node pool. Possible\n values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING,\n ERROR, DEGRADED'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - PROVISIONING\n - RUNNING\n - RECONCILING\n - STOPPING\n - ERROR\n - DEGRADED\n subnetId:\n type: string\n x-dcl-go-name: SubnetId\n description: The subnet where the node pool node run.\n x-kubernetes-immutable: true\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. A globally unique identifier for the node pool.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which this node pool was last updated.\n x-kubernetes-immutable: true\n version:\n type: string\n x-dcl-go-name: Version\n description: The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`).\n You can list all supported versions on a given Google Cloud region by\n calling GetAwsServerConfig.\n") +var YAML_node_pool = []byte("info:\n title: ContainerAws/NodePool\n description: An Anthos node pool running on AWS.\n x-dcl-struct-name: NodePool\n x-dcl-has-iam: false\n x-dcl-ref:\n text: API reference\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud/reference/rest/v1/projects.locations.awsClusters.awsNodePools\n x-dcl-guides:\n - text: Multicloud overview\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud\npaths:\n get:\n description: The function used to get information about a NodePool\n parameters:\n - name: NodePool\n required: true\n description: A full instance of a NodePool\n apply:\n description: The function used to apply information about a NodePool\n parameters:\n - name: NodePool\n required: true\n description: A full instance of a NodePool\n delete:\n description: The function used to delete a NodePool\n parameters:\n - name: NodePool\n required: true\n description: A full instance of a NodePool\n deleteAll:\n description: The function used to delete all NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n NodePool:\n title: NodePool\n x-dcl-id: projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - version\n - config\n - autoscaling\n - subnetId\n - maxPodsConstraint\n - project\n - location\n - cluster\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'Optional. Annotations on the node pool. This field has the\n same restrictions as Kubernetes annotations. The total size of all keys\n and values combined is limited to 256k. Key can have 2 segments: prefix\n (optional) and name (required), separated by a slash (/). Prefix must\n be a DNS subdomain. Name must be 63 characters or less, begin and end\n with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics\n between.'\n autoscaling:\n type: object\n x-dcl-go-name: Autoscaling\n x-dcl-go-type: NodePoolAutoscaling\n description: Autoscaler configuration for this node pool.\n required:\n - minNodeCount\n - maxNodeCount\n properties:\n maxNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MaxNodeCount\n description: Maximum number of nodes in the NodePool. Must be >= min_node_count.\n minNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MinNodeCount\n description: Minimum number of nodes in the NodePool. Must be >= 1 and\n <= max_node_count.\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: The awsCluster for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Gkemulticloud/Cluster\n field: name\n parent: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: NodePoolConfig\n description: The configuration of the node pool.\n required:\n - iamInstanceProfile\n - configEncryption\n properties:\n configEncryption:\n type: object\n x-dcl-go-name: ConfigEncryption\n x-dcl-go-type: NodePoolConfigConfigEncryption\n description: The ARN of the AWS KMS key used to encrypt node pool configuration.\n required:\n - kmsKeyArn\n properties:\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: The ARN of the AWS KMS key used to encrypt node pool\n configuration.\n iamInstanceProfile:\n type: string\n x-dcl-go-name: IamInstanceProfile\n description: The name of the AWS IAM role assigned to nodes in the pool.\n imageType:\n type: string\n x-dcl-go-name: ImageType\n description: The OS image type to use on node pool instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n instancePlacement:\n type: object\n x-dcl-go-name: InstancePlacement\n x-dcl-go-type: NodePoolConfigInstancePlacement\n description: Details of placement information for an instance.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n tenancy:\n type: string\n x-dcl-go-name: Tenancy\n x-dcl-go-type: NodePoolConfigInstancePlacementTenancyEnum\n description: 'The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED,\n DEFAULT, DEDICATED, HOST'\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n enum:\n - TENANCY_UNSPECIFIED\n - DEFAULT\n - DEDICATED\n - HOST\n instanceType:\n type: string\n x-dcl-go-name: InstanceType\n description: Optional. The AWS instance type. When unspecified, it defaults\n to `m5.large`.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The initial labels assigned to nodes of this\n node pool. An object containing a list of \"key\": value pairs. Example\n { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.'\n x-kubernetes-immutable: true\n proxyConfig:\n type: object\n x-dcl-go-name: ProxyConfig\n x-dcl-go-type: NodePoolConfigProxyConfig\n description: Proxy configuration for outbound HTTP(S) traffic.\n required:\n - secretArn\n - secretVersion\n properties:\n secretArn:\n type: string\n x-dcl-go-name: SecretArn\n description: The ARN of the AWS Secret Manager secret that contains\n the HTTP(S) proxy configuration.\n secretVersion:\n type: string\n x-dcl-go-name: SecretVersion\n description: The version string of the AWS Secret Manager secret\n that contains the HTTP(S) proxy configuration.\n rootVolume:\n type: object\n x-dcl-go-name: RootVolume\n x-dcl-go-type: NodePoolConfigRootVolume\n description: Optional. Template for the root volume provisioned for\n node pool nodes. Volumes will be provisioned in the availability zone\n assigned to the node pool subnet. When unspecified, it defaults to\n 32 GiB with the GP2 volume type.\n x-dcl-server-default: true\n properties:\n iops:\n type: integer\n format: int64\n x-dcl-go-name: Iops\n description: Optional. The number of I/O operations per second (IOPS)\n to provision for GP3 volume.\n x-dcl-server-default: true\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: Optional. The Amazon Resource Name (ARN) of the Customer\n Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified,\n the default Amazon managed key associated to the AWS region where\n this cluster runs will be used.\n sizeGib:\n type: integer\n format: int64\n x-dcl-go-name: SizeGib\n description: Optional. The size of the volume, in GiBs. When unspecified,\n a default value is provided. See the specific reference in the\n parent resource.\n x-dcl-server-default: true\n volumeType:\n type: string\n x-dcl-go-name: VolumeType\n x-dcl-go-type: NodePoolConfigRootVolumeVolumeTypeEnum\n description: 'Optional. Type of the EBS volume. When unspecified,\n it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED,\n GP2, GP3'\n x-dcl-server-default: true\n enum:\n - VOLUME_TYPE_UNSPECIFIED\n - GP2\n - GP3\n securityGroupIds:\n type: array\n x-dcl-go-name: SecurityGroupIds\n description: Optional. The IDs of additional security groups to add\n to nodes in this pool. The manager will automatically create security\n groups with minimum rules needed for a functioning cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n sshConfig:\n type: object\n x-dcl-go-name: SshConfig\n x-dcl-go-type: NodePoolConfigSshConfig\n description: Optional. The SSH configuration.\n required:\n - ec2KeyPair\n properties:\n ec2KeyPair:\n type: string\n x-dcl-go-name: Ec2KeyPair\n description: The name of the EC2 key pair used to login into cluster\n machines.\n tags:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Tags\n description: Optional. Key/value metadata to assign to each underlying\n AWS resource. Specify at most 50 pairs containing alphanumerics, spaces,\n and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters.\n Values can be up to 255 Unicode characters.\n x-kubernetes-immutable: true\n taints:\n type: array\n x-dcl-go-name: Taints\n description: Optional. The initial taints assigned to nodes of this\n node pool.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NodePoolConfigTaints\n required:\n - key\n - value\n - effect\n properties:\n effect:\n type: string\n x-dcl-go-name: Effect\n x-dcl-go-type: NodePoolConfigTaintsEffectEnum\n description: 'The taint effect. Possible values: EFFECT_UNSPECIFIED,\n NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE'\n x-kubernetes-immutable: true\n enum:\n - EFFECT_UNSPECIFIED\n - NO_SCHEDULE\n - PREFER_NO_SCHEDULE\n - NO_EXECUTE\n key:\n type: string\n x-dcl-go-name: Key\n description: Key for the taint.\n x-kubernetes-immutable: true\n value:\n type: string\n x-dcl-go-name: Value\n description: Value for the taint.\n x-kubernetes-immutable: true\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which this node pool was created.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Allows clients to perform consistent read-modify-writes through\n optimistic concurrency control. May be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n maxPodsConstraint:\n type: object\n x-dcl-go-name: MaxPodsConstraint\n x-dcl-go-type: NodePoolMaxPodsConstraint\n description: The constraint on the maximum number of pods that can be run\n simultaneously on a node in the node pool.\n x-kubernetes-immutable: true\n required:\n - maxPodsPerNode\n properties:\n maxPodsPerNode:\n type: integer\n format: int64\n x-dcl-go-name: MaxPodsPerNode\n description: The maximum number of pods to schedule on a single node.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of this resource.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: Output only. If set, there are currently changes in flight\n to the node pool.\n x-kubernetes-immutable: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: NodePoolStateEnum\n readOnly: true\n description: 'Output only. The lifecycle state of the node pool. Possible\n values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING,\n ERROR, DEGRADED'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - PROVISIONING\n - RUNNING\n - RECONCILING\n - STOPPING\n - ERROR\n - DEGRADED\n subnetId:\n type: string\n x-dcl-go-name: SubnetId\n description: The subnet where the node pool node run.\n x-kubernetes-immutable: true\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. A globally unique identifier for the node pool.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which this node pool was last updated.\n x-kubernetes-immutable: true\n version:\n type: string\n x-dcl-go-name: Version\n description: The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`).\n You can list all supported versions on a given Google Cloud region by\n calling GetAwsServerConfig.\n") -// 16411 bytes -// MD5: 04af25c2316db8e1bd3313613ea6f636 +// 16410 bytes +// MD5: ca46dd31a1d006f72b4ba903422b0e60 diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool_schema.go index bc88e5f238..874440d870 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta/node_pool_schema.go @@ -259,7 +259,7 @@ func DCLNodePoolSchema() *dcl.Schema { Type: "string", }, GoName: "Labels", - Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", Immutable: true, }, "proxyConfig": &dcl.Property{ diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/azure_client.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/azure_client.go index a8f1208ebc..eed0b91bd9 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/azure_client.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/azure_client.go @@ -151,10 +151,9 @@ func (c *Client) GetClient(ctx context.Context, r *AzureClient) (*AzureClient, e if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/cluster.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/cluster.go index 6a83222fc1..997023f49c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/cluster.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/cluster.go @@ -925,10 +925,9 @@ func (c *Client) GetCluster(ctx context.Context, r *Cluster) (*Cluster, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/node_pool.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/node_pool.go index 7074c4feb6..5bfade300b 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/node_pool.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta/node_pool.go @@ -496,11 +496,10 @@ func (c *Client) GetNodePool(ctx context.Context, r *NodePool) (*NodePool, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Cluster = nr.Cluster - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Cluster = r.Cluster + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/datafusion/beta/instance.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/datafusion/beta/instance.go index 6e8cd8a8e6..19e8ddd8aa 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/datafusion/beta/instance.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/datafusion/beta/instance.go @@ -342,10 +342,9 @@ func (c *Client) GetInstance(ctx context.Context, r *Instance) (*Instance, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset.go index c0c2722db0..05307233d3 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset.go @@ -42,7 +42,7 @@ type Asset struct { Project *string `json:"project"` Location *string `json:"location"` Lake *string `json:"lake"` - Zone *string `json:"zone"` + DataplexZone *string `json:"dataplexZone"` } func (r *Asset) String() string { @@ -91,7 +91,7 @@ func (v AssetResourceSpecTypeEnum) Validate() error { // Empty enum is okay. return nil } - for _, s := range []string{"TYPE_UNSPECIFIED", "STORAGE_BUCKET", "BIGQUERY_DATASET"} { + for _, s := range []string{"STORAGE_BUCKET", "BIGQUERY_DATASET"} { if string(v) == s { return nil } @@ -650,9 +650,9 @@ func (r *Asset) ID() (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "location": dcl.ValueOrEmptyString(nr.Location), "lake": dcl.ValueOrEmptyString(nr.Lake), - "zone": dcl.ValueOrEmptyString(nr.Zone), + "dataplex_zone": dcl.ValueOrEmptyString(nr.DataplexZone), } - return dcl.Nprintf("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{zone}}/assets/{{name}}", params), nil + return dcl.Nprintf("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}", params), nil } const AssetMaxPage = -1 @@ -687,25 +687,25 @@ func (l *AssetList) Next(ctx context.Context, c *Client) error { return err } -func (c *Client) ListAsset(ctx context.Context, project, location, zone, lake string) (*AssetList, error) { +func (c *Client) ListAsset(ctx context.Context, project, location, dataplexZone, lake string) (*AssetList, error) { ctx = dcl.ContextWithRequestID(ctx) ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) defer cancel() - return c.ListAssetWithMaxResults(ctx, project, location, zone, lake, AssetMaxPage) + return c.ListAssetWithMaxResults(ctx, project, location, dataplexZone, lake, AssetMaxPage) } -func (c *Client) ListAssetWithMaxResults(ctx context.Context, project, location, zone, lake string, pageSize int32) (*AssetList, error) { +func (c *Client) ListAssetWithMaxResults(ctx context.Context, project, location, dataplexZone, lake string, pageSize int32) (*AssetList, error) { ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) defer cancel() // Create a resource object so that we can use proper url normalization methods. r := &Asset{ - Project: &project, - Location: &location, - Zone: &zone, - Lake: &lake, + Project: &project, + Location: &location, + DataplexZone: &dataplexZone, + Lake: &lake, } items, token, err := c.listAsset(ctx, r, "", pageSize) if err != nil { @@ -743,12 +743,11 @@ func (c *Client) GetAsset(ctx context.Context, r *Asset) (*Asset, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Zone = nr.Zone - result.Lake = nr.Lake - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.DataplexZone = r.DataplexZone + result.Lake = r.Lake + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) @@ -778,8 +777,8 @@ func (c *Client) DeleteAsset(ctx context.Context, r *Asset) error { } // DeleteAllAsset deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllAsset(ctx context.Context, project, location, zone, lake string, filter func(*Asset) bool) error { - listObj, err := c.ListAsset(ctx, project, location, zone, lake) +func (c *Client) DeleteAllAsset(ctx context.Context, project, location, dataplexZone, lake string, filter func(*Asset) bool) error { + listObj, err := c.ListAsset(ctx, project, location, dataplexZone, lake) if err != nil { return err } diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset.yaml index 6e2ca61712..18654bb7be 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset.yaml @@ -46,7 +46,7 @@ paths: required: true schema: type: string - - name: zone + - name: dataplexzone required: true schema: type: string @@ -65,7 +65,7 @@ paths: required: true schema: type: string - - name: zone + - name: dataplexzone required: true schema: type: string @@ -77,9 +77,7 @@ components: schemas: Asset: title: Asset - x-dcl-id: projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{zone}}/assets/{{name}} - x-dcl-locations: - - zone + x-dcl-id: projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}} x-dcl-parent-container: project x-dcl-has-create: true x-dcl-has-iam: false @@ -94,7 +92,7 @@ components: - project - location - lake - - zone + - dataplexZone properties: createTime: type: string @@ -103,6 +101,11 @@ components: readOnly: true description: Output only. The time when the asset was created. x-kubernetes-immutable: true + dataplexZone: + type: string + x-dcl-go-name: DataplexZone + description: The zone for the resource + x-kubernetes-immutable: true description: type: string x-dcl-go-name: Description @@ -114,6 +117,8 @@ components: description: Required. Specification of the discovery feature applied to data referenced by this asset. When this spec is left unset, the asset will use the spec set on the parent zone. + required: + - enabled properties: csvOptions: type: object @@ -325,10 +330,9 @@ components: x-dcl-go-name: Type x-dcl-go-type: AssetResourceSpecTypeEnum description: 'Required. Immutable. Type of resource. Possible values: - TYPE_UNSPECIFIED, STORAGE_BUCKET, BIGQUERY_DATASET' + STORAGE_BUCKET, BIGQUERY_DATASET' x-kubernetes-immutable: true enum: - - TYPE_UNSPECIFIED - STORAGE_BUCKET - BIGQUERY_DATASET resourceStatus: @@ -415,8 +419,3 @@ components: readOnly: true description: Output only. The time when the asset was last updated. x-kubernetes-immutable: true - zone: - type: string - x-dcl-go-name: Zone - description: The zone for the resource - x-kubernetes-immutable: true diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_beta_yaml_embed.go index e827b318de..d69c40141a 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/dataplex/beta/asset.yaml -var YAML_asset = []byte("info:\n title: Dataplex/Asset\n description: The Dataplex Asset resource\n x-dcl-struct-name: Asset\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Asset\n parameters:\n - name: Asset\n required: true\n description: A full instance of a Asset\n apply:\n description: The function used to apply information about a Asset\n parameters:\n - name: Asset\n required: true\n description: A full instance of a Asset\n delete:\n description: The function used to delete a Asset\n parameters:\n - name: Asset\n required: true\n description: A full instance of a Asset\n deleteAll:\n description: The function used to delete all Asset\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: zone\n required: true\n schema:\n type: string\n - name: lake\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Asset\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: zone\n required: true\n schema:\n type: string\n - name: lake\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Asset:\n title: Asset\n x-dcl-id: projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{zone}}/assets/{{name}}\n x-dcl-locations:\n - zone\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - resourceSpec\n - discoverySpec\n - project\n - location\n - lake\n - zone\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time when the asset was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Description of the asset.\n discoverySpec:\n type: object\n x-dcl-go-name: DiscoverySpec\n x-dcl-go-type: AssetDiscoverySpec\n description: Required. Specification of the discovery feature applied to\n data referenced by this asset. When this spec is left unset, the asset\n will use the spec set on the parent zone.\n properties:\n csvOptions:\n type: object\n x-dcl-go-name: CsvOptions\n x-dcl-go-type: AssetDiscoverySpecCsvOptions\n description: Optional. Configuration for CSV data.\n properties:\n delimiter:\n type: string\n x-dcl-go-name: Delimiter\n description: Optional. The delimiter being used to separate values.\n This defaults to ','.\n disableTypeInference:\n type: boolean\n x-dcl-go-name: DisableTypeInference\n description: Optional. Whether to disable the inference of data\n type for CSV data. If true, all columns will be registered as\n strings.\n encoding:\n type: string\n x-dcl-go-name: Encoding\n description: Optional. The character encoding of the data. The default\n is UTF-8.\n headerRows:\n type: integer\n format: int64\n x-dcl-go-name: HeaderRows\n description: Optional. The number of rows to interpret as header\n rows that should be skipped when reading data rows.\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: Required. Whether discovery is enabled.\n excludePatterns:\n type: array\n x-dcl-go-name: ExcludePatterns\n description: Optional. The list of patterns to apply for selecting data\n to exclude during discovery. For Cloud Storage bucket assets, these\n are interpreted as glob patterns used to match object names. For BigQuery\n dataset assets, these are interpreted as patterns to match table names.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n includePatterns:\n type: array\n x-dcl-go-name: IncludePatterns\n description: Optional. The list of patterns to apply for selecting data\n to include during discovery if only a subset of the data should considered.\n For Cloud Storage bucket assets, these are interpreted as glob patterns\n used to match object names. For BigQuery dataset assets, these are\n interpreted as patterns to match table names.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jsonOptions:\n type: object\n x-dcl-go-name: JsonOptions\n x-dcl-go-type: AssetDiscoverySpecJsonOptions\n description: Optional. Configuration for Json data.\n properties:\n disableTypeInference:\n type: boolean\n x-dcl-go-name: DisableTypeInference\n description: Optional. Whether to disable the inference of data\n type for Json data. If true, all columns will be registered as\n their primitive types (strings, number or boolean).\n encoding:\n type: string\n x-dcl-go-name: Encoding\n description: Optional. The character encoding of the data. The default\n is UTF-8.\n schedule:\n type: string\n x-dcl-go-name: Schedule\n description: 'Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron)\n for running discovery periodically. Successive discovery runs must\n be scheduled at least 60 minutes apart. The default value is to run\n discovery every 60 minutes. To explicitly set a timezone to the cron\n tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or\n TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string\n from IANA time zone database. For example, \"CRON_TZ=America/New_York\n 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".'\n discoveryStatus:\n type: object\n x-dcl-go-name: DiscoveryStatus\n x-dcl-go-type: AssetDiscoveryStatus\n readOnly: true\n description: Output only. Status of the discovery feature applied to data\n referenced by this asset.\n properties:\n lastRunDuration:\n type: string\n x-dcl-go-name: LastRunDuration\n description: The duration of the last discovery run.\n lastRunTime:\n type: string\n format: date-time\n x-dcl-go-name: LastRunTime\n description: The start time of the last discovery run.\n message:\n type: string\n x-dcl-go-name: Message\n description: Additional information about the current state.\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: AssetDiscoveryStatusStateEnum\n description: 'The current status of the discovery feature. Possible\n values: STATE_UNSPECIFIED, SCHEDULED, IN_PROGRESS, PAUSED, DISABLED'\n enum:\n - STATE_UNSPECIFIED\n - SCHEDULED\n - IN_PROGRESS\n - PAUSED\n - DISABLED\n stats:\n type: object\n x-dcl-go-name: Stats\n x-dcl-go-type: AssetDiscoveryStatusStats\n description: Data Stats of the asset reported by discovery.\n properties:\n dataItems:\n type: integer\n format: int64\n x-dcl-go-name: DataItems\n description: The count of data items within the referenced resource.\n dataSize:\n type: integer\n format: int64\n x-dcl-go-name: DataSize\n description: The number of stored data bytes within the referenced\n resource.\n filesets:\n type: integer\n format: int64\n x-dcl-go-name: Filesets\n description: The count of fileset entities within the referenced\n resource.\n tables:\n type: integer\n format: int64\n x-dcl-go-name: Tables\n description: The count of table entities within the referenced resource.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last update time of the status.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: Optional. User friendly display name.\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. User defined labels for the asset.\n lake:\n type: string\n x-dcl-go-name: Lake\n description: The lake for the resource\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of the asset.\n x-dcl-references:\n - resource: Dataplex/Asset\n field: selfLink\n parent: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n resourceSpec:\n type: object\n x-dcl-go-name: ResourceSpec\n x-dcl-go-type: AssetResourceSpec\n description: Required. Immutable. Specification of the resource that is\n referenced by this asset.\n x-kubernetes-immutable: true\n required:\n - type\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Immutable. Relative name of the cloud resource that contains\n the data that is being managed within a lake. For example: `projects/{project_number}/buckets/{bucket_id}`\n `projects/{project_number}/datasets/{dataset_id}`'\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: AssetResourceSpecTypeEnum\n description: 'Required. Immutable. Type of resource. Possible values:\n TYPE_UNSPECIFIED, STORAGE_BUCKET, BIGQUERY_DATASET'\n x-kubernetes-immutable: true\n enum:\n - TYPE_UNSPECIFIED\n - STORAGE_BUCKET\n - BIGQUERY_DATASET\n resourceStatus:\n type: object\n x-dcl-go-name: ResourceStatus\n x-dcl-go-type: AssetResourceStatus\n readOnly: true\n description: Output only. Status of the resource referenced by this asset.\n properties:\n message:\n type: string\n x-dcl-go-name: Message\n description: Additional information about the current state.\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: AssetResourceStatusStateEnum\n description: 'The current state of the managed resource. Possible values:\n STATE_UNSPECIFIED, READY, ERROR'\n enum:\n - STATE_UNSPECIFIED\n - READY\n - ERROR\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last update time of the status.\n securityStatus:\n type: object\n x-dcl-go-name: SecurityStatus\n x-dcl-go-type: AssetSecurityStatus\n readOnly: true\n description: Output only. Status of the security policy applied to resource\n referenced by this asset.\n properties:\n message:\n type: string\n x-dcl-go-name: Message\n description: Additional information about the current state.\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: AssetSecurityStatusStateEnum\n description: 'The current state of the security policy applied to the\n attached resource. Possible values: STATE_UNSPECIFIED, READY, APPLYING,\n ERROR'\n enum:\n - STATE_UNSPECIFIED\n - READY\n - APPLYING\n - ERROR\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last update time of the status.\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: AssetStateEnum\n readOnly: true\n description: 'Output only. Current state of the asset. Possible values:\n STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - ACTIVE\n - CREATING\n - DELETING\n - ACTION_REQUIRED\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. System generated globally unique ID for the asset.\n This ID will be different if the asset is deleted and re-created with\n the same name.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time when the asset was last updated.\n x-kubernetes-immutable: true\n zone:\n type: string\n x-dcl-go-name: Zone\n description: The zone for the resource\n x-kubernetes-immutable: true\n") +var YAML_asset = []byte("info:\n title: Dataplex/Asset\n description: The Dataplex Asset resource\n x-dcl-struct-name: Asset\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Asset\n parameters:\n - name: Asset\n required: true\n description: A full instance of a Asset\n apply:\n description: The function used to apply information about a Asset\n parameters:\n - name: Asset\n required: true\n description: A full instance of a Asset\n delete:\n description: The function used to delete a Asset\n parameters:\n - name: Asset\n required: true\n description: A full instance of a Asset\n deleteAll:\n description: The function used to delete all Asset\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: dataplexzone\n required: true\n schema:\n type: string\n - name: lake\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Asset\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: dataplexzone\n required: true\n schema:\n type: string\n - name: lake\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Asset:\n title: Asset\n x-dcl-id: projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - resourceSpec\n - discoverySpec\n - project\n - location\n - lake\n - dataplexZone\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time when the asset was created.\n x-kubernetes-immutable: true\n dataplexZone:\n type: string\n x-dcl-go-name: DataplexZone\n description: The zone for the resource\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Description of the asset.\n discoverySpec:\n type: object\n x-dcl-go-name: DiscoverySpec\n x-dcl-go-type: AssetDiscoverySpec\n description: Required. Specification of the discovery feature applied to\n data referenced by this asset. When this spec is left unset, the asset\n will use the spec set on the parent zone.\n required:\n - enabled\n properties:\n csvOptions:\n type: object\n x-dcl-go-name: CsvOptions\n x-dcl-go-type: AssetDiscoverySpecCsvOptions\n description: Optional. Configuration for CSV data.\n properties:\n delimiter:\n type: string\n x-dcl-go-name: Delimiter\n description: Optional. The delimiter being used to separate values.\n This defaults to ','.\n disableTypeInference:\n type: boolean\n x-dcl-go-name: DisableTypeInference\n description: Optional. Whether to disable the inference of data\n type for CSV data. If true, all columns will be registered as\n strings.\n encoding:\n type: string\n x-dcl-go-name: Encoding\n description: Optional. The character encoding of the data. The default\n is UTF-8.\n headerRows:\n type: integer\n format: int64\n x-dcl-go-name: HeaderRows\n description: Optional. The number of rows to interpret as header\n rows that should be skipped when reading data rows.\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: Required. Whether discovery is enabled.\n excludePatterns:\n type: array\n x-dcl-go-name: ExcludePatterns\n description: Optional. The list of patterns to apply for selecting data\n to exclude during discovery. For Cloud Storage bucket assets, these\n are interpreted as glob patterns used to match object names. For BigQuery\n dataset assets, these are interpreted as patterns to match table names.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n includePatterns:\n type: array\n x-dcl-go-name: IncludePatterns\n description: Optional. The list of patterns to apply for selecting data\n to include during discovery if only a subset of the data should considered.\n For Cloud Storage bucket assets, these are interpreted as glob patterns\n used to match object names. For BigQuery dataset assets, these are\n interpreted as patterns to match table names.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jsonOptions:\n type: object\n x-dcl-go-name: JsonOptions\n x-dcl-go-type: AssetDiscoverySpecJsonOptions\n description: Optional. Configuration for Json data.\n properties:\n disableTypeInference:\n type: boolean\n x-dcl-go-name: DisableTypeInference\n description: Optional. Whether to disable the inference of data\n type for Json data. If true, all columns will be registered as\n their primitive types (strings, number or boolean).\n encoding:\n type: string\n x-dcl-go-name: Encoding\n description: Optional. The character encoding of the data. The default\n is UTF-8.\n schedule:\n type: string\n x-dcl-go-name: Schedule\n description: 'Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron)\n for running discovery periodically. Successive discovery runs must\n be scheduled at least 60 minutes apart. The default value is to run\n discovery every 60 minutes. To explicitly set a timezone to the cron\n tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or\n TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string\n from IANA time zone database. For example, \"CRON_TZ=America/New_York\n 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".'\n discoveryStatus:\n type: object\n x-dcl-go-name: DiscoveryStatus\n x-dcl-go-type: AssetDiscoveryStatus\n readOnly: true\n description: Output only. Status of the discovery feature applied to data\n referenced by this asset.\n properties:\n lastRunDuration:\n type: string\n x-dcl-go-name: LastRunDuration\n description: The duration of the last discovery run.\n lastRunTime:\n type: string\n format: date-time\n x-dcl-go-name: LastRunTime\n description: The start time of the last discovery run.\n message:\n type: string\n x-dcl-go-name: Message\n description: Additional information about the current state.\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: AssetDiscoveryStatusStateEnum\n description: 'The current status of the discovery feature. Possible\n values: STATE_UNSPECIFIED, SCHEDULED, IN_PROGRESS, PAUSED, DISABLED'\n enum:\n - STATE_UNSPECIFIED\n - SCHEDULED\n - IN_PROGRESS\n - PAUSED\n - DISABLED\n stats:\n type: object\n x-dcl-go-name: Stats\n x-dcl-go-type: AssetDiscoveryStatusStats\n description: Data Stats of the asset reported by discovery.\n properties:\n dataItems:\n type: integer\n format: int64\n x-dcl-go-name: DataItems\n description: The count of data items within the referenced resource.\n dataSize:\n type: integer\n format: int64\n x-dcl-go-name: DataSize\n description: The number of stored data bytes within the referenced\n resource.\n filesets:\n type: integer\n format: int64\n x-dcl-go-name: Filesets\n description: The count of fileset entities within the referenced\n resource.\n tables:\n type: integer\n format: int64\n x-dcl-go-name: Tables\n description: The count of table entities within the referenced resource.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last update time of the status.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: Optional. User friendly display name.\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. User defined labels for the asset.\n lake:\n type: string\n x-dcl-go-name: Lake\n description: The lake for the resource\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of the asset.\n x-dcl-references:\n - resource: Dataplex/Asset\n field: selfLink\n parent: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n resourceSpec:\n type: object\n x-dcl-go-name: ResourceSpec\n x-dcl-go-type: AssetResourceSpec\n description: Required. Immutable. Specification of the resource that is\n referenced by this asset.\n x-kubernetes-immutable: true\n required:\n - type\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Immutable. Relative name of the cloud resource that contains\n the data that is being managed within a lake. For example: `projects/{project_number}/buckets/{bucket_id}`\n `projects/{project_number}/datasets/{dataset_id}`'\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: AssetResourceSpecTypeEnum\n description: 'Required. Immutable. Type of resource. Possible values:\n STORAGE_BUCKET, BIGQUERY_DATASET'\n x-kubernetes-immutable: true\n enum:\n - STORAGE_BUCKET\n - BIGQUERY_DATASET\n resourceStatus:\n type: object\n x-dcl-go-name: ResourceStatus\n x-dcl-go-type: AssetResourceStatus\n readOnly: true\n description: Output only. Status of the resource referenced by this asset.\n properties:\n message:\n type: string\n x-dcl-go-name: Message\n description: Additional information about the current state.\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: AssetResourceStatusStateEnum\n description: 'The current state of the managed resource. Possible values:\n STATE_UNSPECIFIED, READY, ERROR'\n enum:\n - STATE_UNSPECIFIED\n - READY\n - ERROR\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last update time of the status.\n securityStatus:\n type: object\n x-dcl-go-name: SecurityStatus\n x-dcl-go-type: AssetSecurityStatus\n readOnly: true\n description: Output only. Status of the security policy applied to resource\n referenced by this asset.\n properties:\n message:\n type: string\n x-dcl-go-name: Message\n description: Additional information about the current state.\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: AssetSecurityStatusStateEnum\n description: 'The current state of the security policy applied to the\n attached resource. Possible values: STATE_UNSPECIFIED, READY, APPLYING,\n ERROR'\n enum:\n - STATE_UNSPECIFIED\n - READY\n - APPLYING\n - ERROR\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last update time of the status.\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: AssetStateEnum\n readOnly: true\n description: 'Output only. Current state of the asset. Possible values:\n STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - ACTIVE\n - CREATING\n - DELETING\n - ACTION_REQUIRED\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. System generated globally unique ID for the asset.\n This ID will be different if the asset is deleted and re-created with\n the same name.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time when the asset was last updated.\n x-kubernetes-immutable: true\n") -// 15319 bytes -// MD5: 6c49002a373b3b495066dadbb954e64a +// 15321 bytes +// MD5: 229f226d4f56fbfbca391a6bf4942b9a diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_internal.go index 2df8ea54a9..1fe209d535 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_internal.go @@ -45,7 +45,7 @@ func (r *Asset) validate() error { if err := dcl.RequiredParameter(r.Lake, "Lake"); err != nil { return err } - if err := dcl.RequiredParameter(r.Zone, "Zone"); err != nil { + if err := dcl.RequiredParameter(r.DataplexZone, "DataplexZone"); err != nil { return err } if !dcl.IsEmptyValueIndirect(r.ResourceSpec) { @@ -88,6 +88,9 @@ func (r *AssetSecurityStatus) validate() error { return nil } func (r *AssetDiscoverySpec) validate() error { + if err := dcl.Required(r, "enabled"); err != nil { + return err + } if !dcl.IsEmptyValueIndirect(r.CsvOptions) { if err := r.CsvOptions.validate(); err != nil { return err @@ -125,50 +128,50 @@ func (r *Asset) basePath() string { func (r *Asset) getURL(userBasePath string) (string, error) { nr := r.urlNormalized() params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "zone": dcl.ValueOrEmptyString(nr.Zone), - "lake": dcl.ValueOrEmptyString(nr.Lake), - "name": dcl.ValueOrEmptyString(nr.Name), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{zone}}/assets/{{name}}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *Asset) listURL(userBasePath string) (string, error) { nr := r.urlNormalized() params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "zone": dcl.ValueOrEmptyString(nr.Zone), - "lake": dcl.ValueOrEmptyString(nr.Lake), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), } - return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{zone}}/assets", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets", nr.basePath(), userBasePath, params), nil } func (r *Asset) createURL(userBasePath string) (string, error) { nr := r.urlNormalized() params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "zone": dcl.ValueOrEmptyString(nr.Zone), - "lake": dcl.ValueOrEmptyString(nr.Lake), - "name": dcl.ValueOrEmptyString(nr.Name), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{zone}}/assets?assetId={{name}}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets?assetId={{name}}", nr.basePath(), userBasePath, params), nil } func (r *Asset) deleteURL(userBasePath string) (string, error) { nr := r.urlNormalized() params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "zone": dcl.ValueOrEmptyString(nr.Zone), - "lake": dcl.ValueOrEmptyString(nr.Lake), - "name": dcl.ValueOrEmptyString(nr.Name), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{zone}}/assets/{{name}}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *Asset) SetPolicyURL(userBasePath string) string { @@ -205,7 +208,7 @@ func newUpdateAssetUpdateAssetRequest(ctx context.Context, f *Asset, c *Client) res := f _ = res - if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.Zone), dcl.SelfLinkToName(f.Name)); err != nil { + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.DataplexZone), dcl.SelfLinkToName(f.Name)); err != nil { return nil, fmt.Errorf("error expanding Name into name: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { req["name"] = v @@ -239,7 +242,7 @@ func newUpdateAssetUpdateAssetRequest(ctx context.Context, f *Asset, c *Client) } else if !dcl.IsEmptyValueIndirect(v) { req["discoveryStatus"] = v } - req["name"] = fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", *f.Project, *f.Location, *f.Lake, *f.Zone, *f.Name) + req["name"] = fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", *f.Project, *f.Location, *f.Lake, *f.DataplexZone, *f.Name) return req, nil } @@ -358,7 +361,7 @@ func (c *Client) listAsset(ctx context.Context, r *Asset, pageToken string, page } res.Project = r.Project res.Location = r.Location - res.Zone = r.Zone + res.DataplexZone = r.DataplexZone res.Lake = r.Lake l = append(l, res) } @@ -618,10 +621,10 @@ func canonicalizeAssetDesiredState(rawDesired, rawInitial *Asset, opts ...dcl.Ap } else { canonicalDesired.Lake = rawDesired.Lake } - if dcl.NameToSelfLink(rawDesired.Zone, rawInitial.Zone) { - canonicalDesired.Zone = rawInitial.Zone + if dcl.NameToSelfLink(rawDesired.DataplexZone, rawInitial.DataplexZone) { + canonicalDesired.DataplexZone = rawInitial.DataplexZone } else { - canonicalDesired.Zone = rawDesired.Zone + canonicalDesired.DataplexZone = rawDesired.DataplexZone } return canonicalDesired, nil @@ -717,7 +720,7 @@ func canonicalizeAssetNewState(c *Client, rawNew, rawDesired *Asset) (*Asset, er rawNew.Lake = rawDesired.Lake - rawNew.Zone = rawDesired.Zone + rawNew.DataplexZone = rawDesired.DataplexZone return rawNew, nil } @@ -1903,7 +1906,7 @@ func diffAsset(c *Client, desired, actual *Asset, opts ...dcl.ApplyOption) ([]*d newDiffs = append(newDiffs, ds...) } - if ds, err := dcl.Diff(desired.Zone, actual.Zone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Zone")); len(ds) != 0 || err != nil { + if ds, err := dcl.Diff(desired.DataplexZone, actual.DataplexZone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Zone")); len(ds) != 0 || err != nil { if err != nil { return nil, err } @@ -2310,7 +2313,7 @@ func (r *Asset) urlNormalized() *Asset { normalized.Project = dcl.SelfLinkToName(r.Project) normalized.Location = dcl.SelfLinkToName(r.Location) normalized.Lake = dcl.SelfLinkToName(r.Lake) - normalized.Zone = dcl.SelfLinkToName(r.Zone) + normalized.DataplexZone = dcl.SelfLinkToName(r.DataplexZone) return &normalized } @@ -2318,13 +2321,13 @@ func (r *Asset) updateURL(userBasePath, updateName string) (string, error) { nr := r.urlNormalized() if updateName == "UpdateAsset" { fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "zone": dcl.ValueOrEmptyString(nr.Zone), - "lake": dcl.ValueOrEmptyString(nr.Lake), - "name": dcl.ValueOrEmptyString(nr.Name), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{zone}}/assets/{{name}}", nr.basePath(), userBasePath, fields), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets/{{name}}", nr.basePath(), userBasePath, fields), nil } @@ -2366,7 +2369,7 @@ func expandAsset(c *Client, f *Asset) (map[string]interface{}, error) { m := make(map[string]interface{}) res := f _ = res - if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.Zone), dcl.SelfLinkToName(f.Name)); err != nil { + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.DataplexZone), dcl.SelfLinkToName(f.Name)); err != nil { return nil, fmt.Errorf("error expanding Name into name: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { m["name"] = v @@ -2406,7 +2409,7 @@ func expandAsset(c *Client, f *Asset) (map[string]interface{}, error) { m["lake"] = v } if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Zone into zone: %w", err) + return nil, fmt.Errorf("error expanding DataplexZone into zone: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { m["zone"] = v } @@ -2442,7 +2445,7 @@ func flattenAsset(c *Client, i interface{}, res *Asset) *Asset { resultRes.Project = dcl.FlattenString(m["project"]) resultRes.Location = dcl.FlattenString(m["location"]) resultRes.Lake = dcl.FlattenString(m["lake"]) - resultRes.Zone = dcl.FlattenString(m["zone"]) + resultRes.DataplexZone = dcl.FlattenString(m["zone"]) return resultRes } @@ -3738,12 +3741,12 @@ func (r *Asset) matcher(c *Client) func([]byte) bool { } else if *nr.Location != *ncr.Location { return false } - if nr.Zone == nil && ncr.Zone == nil { - c.Config.Logger.Info("Both Zone fields null - considering equal.") - } else if nr.Zone == nil || ncr.Zone == nil { - c.Config.Logger.Info("Only one Zone field is null - considering unequal.") + if nr.DataplexZone == nil && ncr.DataplexZone == nil { + c.Config.Logger.Info("Both DataplexZone fields null - considering equal.") + } else if nr.DataplexZone == nil || ncr.DataplexZone == nil { + c.Config.Logger.Info("Only one DataplexZone field is null - considering unequal.") return false - } else if *nr.Zone != *ncr.Zone { + } else if *nr.DataplexZone != *ncr.DataplexZone { return false } if nr.Lake == nil && ncr.Lake == nil { diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_schema.go index ba2d996696..1d04dbf776 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/asset_schema.go @@ -73,7 +73,7 @@ func DCLAssetSchema() *dcl.Schema { }, }, dcl.PathParameters{ - Name: "zone", + Name: "dataplexzone", Required: true, Schema: &dcl.PathParametersSchema{ Type: "string", @@ -106,7 +106,7 @@ func DCLAssetSchema() *dcl.Schema { }, }, dcl.PathParameters{ - Name: "zone", + Name: "dataplexzone", Required: true, Schema: &dcl.PathParametersSchema{ Type: "string", @@ -125,11 +125,8 @@ func DCLAssetSchema() *dcl.Schema { Components: &dcl.Components{ Schemas: map[string]*dcl.Component{ "Asset": &dcl.Component{ - Title: "Asset", - ID: "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{zone}}/assets/{{name}}", - Locations: []string{ - "zone", - }, + Title: "Asset", + ID: "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}", ParentContainer: "project", HasCreate: true, SchemaProperty: dcl.Property{ @@ -141,7 +138,7 @@ func DCLAssetSchema() *dcl.Schema { "project", "location", "lake", - "zone", + "dataplexZone", }, Properties: map[string]*dcl.Property{ "createTime": &dcl.Property{ @@ -152,6 +149,12 @@ func DCLAssetSchema() *dcl.Schema { Description: "Output only. The time when the asset was created.", Immutable: true, }, + "dataplexZone": &dcl.Property{ + Type: "string", + GoName: "DataplexZone", + Description: "The zone for the resource", + Immutable: true, + }, "description": &dcl.Property{ Type: "string", GoName: "Description", @@ -162,6 +165,9 @@ func DCLAssetSchema() *dcl.Schema { GoName: "DiscoverySpec", GoType: "AssetDiscoverySpec", Description: "Required. Specification of the discovery feature applied to data referenced by this asset. When this spec is left unset, the asset will use the spec set on the parent zone.", + Required: []string{ + "enabled", + }, Properties: map[string]*dcl.Property{ "csvOptions": &dcl.Property{ Type: "object", @@ -390,10 +396,9 @@ func DCLAssetSchema() *dcl.Schema { Type: "string", GoName: "Type", GoType: "AssetResourceSpecTypeEnum", - Description: "Required. Immutable. Type of resource. Possible values: TYPE_UNSPECIFIED, STORAGE_BUCKET, BIGQUERY_DATASET", + Description: "Required. Immutable. Type of resource. Possible values: STORAGE_BUCKET, BIGQUERY_DATASET", Immutable: true, Enum: []string{ - "TYPE_UNSPECIFIED", "STORAGE_BUCKET", "BIGQUERY_DATASET", }, @@ -493,12 +498,6 @@ func DCLAssetSchema() *dcl.Schema { Description: "Output only. The time when the asset was last updated.", Immutable: true, }, - "zone": &dcl.Property{ - Type: "string", - GoName: "Zone", - Description: "The zone for the resource", - Immutable: true, - }, }, }, }, diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/lake.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/lake.go index 98f5c605fe..212df27809 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/lake.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/lake.go @@ -373,10 +373,9 @@ func (c *Client) GetLake(ctx context.Context, r *Lake) (*Lake, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/zone.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/zone.go index f203742d3c..5fe5b63d9a 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/zone.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta/zone.go @@ -513,11 +513,10 @@ func (c *Client) GetZone(ctx context.Context, r *Zone) (*Zone, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Lake = nr.Lake - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Lake = r.Lake + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/autoscaling_policy.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/autoscaling_policy.go index 878571601c..ed50c096dc 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/autoscaling_policy.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/autoscaling_policy.go @@ -360,10 +360,9 @@ func (c *Client) GetAutoscalingPolicy(ctx context.Context, r *AutoscalingPolicy) if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/autoscaling_policy.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/autoscaling_policy.go index 5fe22a5cab..e90d53ca0b 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/autoscaling_policy.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/autoscaling_policy.go @@ -360,10 +360,9 @@ func (c *Client) GetAutoscalingPolicy(ctx context.Context, r *AutoscalingPolicy) if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/cluster.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/cluster.go index 885e56c1ec..ffb10c986d 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/cluster.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/cluster.go @@ -2088,10 +2088,9 @@ func (c *Client) GetCluster(ctx context.Context, r *Cluster) (*Cluster, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template.go index 9e49e3c339..36c7fa3fe4 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template.go @@ -3316,10 +3316,9 @@ func (c *Client) GetWorkflowTemplate(ctx context.Context, r *WorkflowTemplate) ( if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template.yaml index 6c8015f323..817817fd73 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template.yaml @@ -262,7 +262,7 @@ components: need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet - that uses a QueryList to specify a HiveJob: "hiveJob": { + that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' x-kubernetes-immutable: true @@ -367,7 +367,7 @@ components: need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet - that uses a QueryList to specify a HiveJob: "hiveJob": { + that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' x-kubernetes-immutable: true @@ -473,7 +473,7 @@ components: need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet - that uses a QueryList to specify a HiveJob: "hiveJob": { + that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' x-kubernetes-immutable: true @@ -851,7 +851,7 @@ components: need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet - that uses a QueryList to specify a HiveJob: "hiveJob": { + that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' x-kubernetes-immutable: true diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template_beta_yaml_embed.go index 949a63e327..58e7dbb746 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/dataproc/beta/workflow_template.yaml -var YAML_workflow_template = []byte("info:\n title: Dataproc/WorkflowTemplate\n description: The Dataproc WorkflowTemplate resource\n x-dcl-struct-name: WorkflowTemplate\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n apply:\n description: The function used to apply information about a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n delete:\n description: The function used to delete a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n deleteAll:\n description: The function used to delete all WorkflowTemplate\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many WorkflowTemplate\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n WorkflowTemplate:\n title: WorkflowTemplate\n x-dcl-id: projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - placement\n - jobs\n - project\n - location\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time template was created.\n x-kubernetes-immutable: true\n dagTimeout:\n type: string\n x-dcl-go-name: DagTimeout\n description: Optional. Timeout duration for the DAG of jobs, expressed in\n seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n The timeout duration must be from 10 minutes (\"600s\") to 24 hours (\"86400s\").\n The timer begins when the first job is submitted. If the workflow is running\n at the end of the timeout period, any remaining jobs are cancelled, the\n workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),\n the cluster is deleted.\n x-kubernetes-immutable: true\n jobs:\n type: array\n x-dcl-go-name: Jobs\n description: Required. The Directed Acyclic Graph of Jobs to submit.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplateJobs\n required:\n - stepId\n properties:\n hadoopJob:\n type: object\n x-dcl-go-name: HadoopJob\n x-dcl-go-type: WorkflowTemplateJobsHadoopJob\n description: Optional. Job is a Hadoop job.\n x-kubernetes-immutable: true\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n in the working directory of Hadoop drivers and tasks. Supported\n file types: .jar, .tar, .tar.gz, .tgz, or .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `-libjars` or `-Dfoo=bar`, that\n can be set as job properties, since a collision may occur that\n causes an incorrect job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS (Hadoop Compatible Filesystem) URIs\n of files to be copied to the working directory of Hadoop drivers\n and distributed tasks. Useful for naively parallel tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. Jar file URIs to add to the CLASSPATHs\n of the Hadoop driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsHadoopJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainClass:\n type: string\n x-dcl-go-name: MainClass\n description: The name of the driver's main class. The jar file\n containing the class must be in the default CLASSPATH or specified\n in `jar_file_uris`.\n x-kubernetes-immutable: true\n mainJarFileUri:\n type: string\n x-dcl-go-name: MainJarFileUri\n description: 'The HCFS URI of the jar file containing the main\n class. Examples: ''gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar''\n ''hdfs:/tmp/test-samples/custom-wordcount.jar'' ''file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Hadoop. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site and classes in user code.\n x-kubernetes-immutable: true\n hiveJob:\n type: object\n x-dcl-go-name: HiveJob\n x-dcl-go-type: WorkflowTemplateJobsHiveJob\n description: Optional. Job is a Hive job.\n x-kubernetes-immutable: true\n properties:\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATH\n of the Hive server and Hadoop MapReduce (MR) tasks. Can contain\n Hive SerDes and UDFs.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names and values,\n used to configure Hive. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml,\n and classes in user code.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains Hive queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsHiveJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\": {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Hive command: `SET name=\"value\";`).'\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The labels to associate with this job. Label\n keys must be between 1 and 63 characters long, and must conform\n to the following regular expression: p{Ll}p{Lo}{0,62} Label values\n must be between 1 and 63 characters long, and must conform to the\n following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than\n 32 labels can be associated with a given job.'\n x-kubernetes-immutable: true\n pigJob:\n type: object\n x-dcl-go-name: PigJob\n x-dcl-go-type: WorkflowTemplateJobsPigJob\n description: Optional. Job is a Pig job.\n x-kubernetes-immutable: true\n properties:\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATH\n of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain\n Pig UDFs.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPigJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Pig. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties,\n and classes in user code.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains the Pig\n queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsPigJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\": {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Pig command: `name=[value]`).'\n x-kubernetes-immutable: true\n prerequisiteStepIds:\n type: array\n x-dcl-go-name: PrerequisiteStepIds\n description: Optional. The optional list of prerequisite job step_ids.\n If not specified, the job will start at the beginning of workflow.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n prestoJob:\n type: object\n x-dcl-go-name: PrestoJob\n x-dcl-go-type: WorkflowTemplateJobsPrestoJob\n description: Optional. Job is a Presto job.\n x-kubernetes-immutable: true\n properties:\n clientTags:\n type: array\n x-dcl-go-name: ClientTags\n description: Optional. Presto client tags to attach to this query\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPrestoJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n outputFormat:\n type: string\n x-dcl-go-name: OutputFormat\n description: Optional. The format in which query output will be\n displayed. See the Presto documentation for supported output\n formats\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values.\n Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html)\n Equivalent to using the --session flag in the Presto CLI\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains SQL queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsPrestoJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\": {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n pysparkJob:\n type: object\n x-dcl-go-name: PysparkJob\n x-dcl-go-type: WorkflowTemplateJobsPysparkJob\n description: Optional. Job is a PySpark job.\n x-kubernetes-immutable: true\n required:\n - mainPythonFileUri\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATHs\n of the Python driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPysparkJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainPythonFileUri:\n type: string\n x-dcl-go-name: MainPythonFileUri\n description: Required. The HCFS URI of the main Python file to\n use as the driver. Must be a .py file.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure PySpark. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n pythonFileUris:\n type: array\n x-dcl-go-name: PythonFileUris\n description: 'Optional. HCFS file URIs of Python files to pass\n to the PySpark framework. Supported file types: .py, .egg, and\n .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scheduling:\n type: object\n x-dcl-go-name: Scheduling\n x-dcl-go-type: WorkflowTemplateJobsScheduling\n description: Optional. Job scheduling configuration.\n x-kubernetes-immutable: true\n properties:\n maxFailuresPerHour:\n type: integer\n format: int64\n x-dcl-go-name: MaxFailuresPerHour\n description: Optional. Maximum number of times per hour a driver\n may be restarted as a result of driver exiting with non-zero\n code before job is reported failed. A job may be reported as\n thrashing if driver exits with non-zero code 4 times within\n 10 minute window. Maximum value is 10.\n x-kubernetes-immutable: true\n maxFailuresTotal:\n type: integer\n format: int64\n x-dcl-go-name: MaxFailuresTotal\n description: Optional. Maximum number of times in total a driver\n may be restarted as a result of driver exiting with non-zero\n code before job is reported failed. Maximum value is 240.\n x-kubernetes-immutable: true\n sparkJob:\n type: object\n x-dcl-go-name: SparkJob\n x-dcl-go-type: WorkflowTemplateJobsSparkJob\n description: Optional. Job is a Spark job.\n x-kubernetes-immutable: true\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATHs\n of the Spark driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainClass:\n type: string\n x-dcl-go-name: MainClass\n description: The name of the driver's main class. The jar file\n that contains the class must be in the default CLASSPATH or\n specified in `jar_file_uris`.\n x-kubernetes-immutable: true\n mainJarFileUri:\n type: string\n x-dcl-go-name: MainJarFileUri\n description: The HCFS URI of the jar file that contains the main\n class.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Spark. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n sparkRJob:\n type: object\n x-dcl-go-name: SparkRJob\n x-dcl-go-type: WorkflowTemplateJobsSparkRJob\n description: Optional. Job is a SparkR job.\n x-kubernetes-immutable: true\n required:\n - mainRFileUri\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkRJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainRFileUri:\n type: string\n x-dcl-go-name: MainRFileUri\n description: Required. The HCFS URI of the main R file to use\n as the driver. Must be a .R file.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure SparkR. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n sparkSqlJob:\n type: object\n x-dcl-go-name: SparkSqlJob\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJob\n description: Optional. Job is a SparkSql job.\n x-kubernetes-immutable: true\n properties:\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to be added to the\n Spark CLASSPATH.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Spark SQL's SparkConf. Properties that conflict\n with values set by the Dataproc API may be overwritten.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains SQL queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\": {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Spark SQL command: SET `name=\"value\";`).'\n x-kubernetes-immutable: true\n stepId:\n type: string\n x-dcl-go-name: StepId\n description: Required. The step id. The id must be unique among all\n jobs within the template. The step id is used as prefix for job\n id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds\n field from other steps. The id must contain only letters (a-z, A-Z),\n numbers (0-9), underscores (_), and hyphens (-). Cannot begin or\n end with underscore or hyphen. Must consist of between 3 and 50\n characters.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. The labels to associate with this template. These\n labels will be propagated to all jobs and clusters created by the workflow\n instance. Label **keys** must contain 1 to 63 characters, and must conform\n to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values**\n may be empty, but, if present, must contain 1 to 63 characters, and must\n conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than\n 32 labels can be associated with a template.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Output only. The resource name of the workflow template, as\n described in https://cloud.google.com/apis/design/resource_names. * For\n `projects.regions.workflowTemplates`, the resource name of the template\n has the following format: `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`\n * For `projects.locations.workflowTemplates`, the resource name of the\n template has the following format: `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`'\n x-kubernetes-immutable: true\n parameters:\n type: array\n x-dcl-go-name: Parameters\n description: Optional. Template parameters whose values are substituted\n into the template. Values for parameters must be provided when the template\n is instantiated.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplateParameters\n required:\n - name\n - fields\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Brief description of the parameter. Must not\n exceed 1024 characters.\n x-kubernetes-immutable: true\n fields:\n type: array\n x-dcl-go-name: Fields\n description: 'Required. Paths to all fields that the parameter replaces.\n A field is allowed to appear in at most one parameter''s list of\n field paths. A field path is similar in syntax to a google.protobuf.FieldMask.\n For example, a field path that references the zone field of a workflow\n template''s cluster selector would be specified as `placement.clusterSelector.zone`.\n Also, field paths can reference fields using the following syntax:\n * Values in maps can be referenced by key: * labels[''key''] * placement.clusterSelector.clusterLabels[''key'']\n * placement.managedCluster.labels[''key''] * placement.clusterSelector.clusterLabels[''key'']\n * jobs[''step-id''].labels[''key''] * Jobs in the jobs list can\n be referenced by step-id: * jobs[''step-id''].hadoopJob.mainJarFileUri\n * jobs[''step-id''].hiveJob.queryFileUri * jobs[''step-id''].pySparkJob.mainPythonFileUri\n * jobs[''step-id''].hadoopJob.jarFileUris[0] * jobs[''step-id''].hadoopJob.archiveUris[0]\n * jobs[''step-id''].hadoopJob.fileUris[0] * jobs[''step-id''].pySparkJob.pythonFileUris[0]\n * Items in repeated fields can be referenced by a zero-based index:\n * jobs[''step-id''].sparkJob.args[0] * Other examples: * jobs[''step-id''].hadoopJob.properties[''key'']\n * jobs[''step-id''].hadoopJob.args[0] * jobs[''step-id''].hiveJob.scriptVariables[''key'']\n * jobs[''step-id''].hadoopJob.mainJarFileUri * placement.clusterSelector.zone\n It may not be possible to parameterize maps and repeated fields\n in their entirety since only individual map values and individual\n items in repeated fields can be referenced. For example, the following\n field paths are invalid: - placement.clusterSelector.clusterLabels\n - jobs[''step-id''].sparkJob.args'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Parameter name. The parameter name is used\n as the key, and paired with the parameter value, which are passed\n to the template when the template is instantiated. The name must\n contain only capital letters (A-Z), numbers (0-9), and underscores\n (_), and must not start with a number. The maximum length is 40\n characters.\n x-kubernetes-immutable: true\n validation:\n type: object\n x-dcl-go-name: Validation\n x-dcl-go-type: WorkflowTemplateParametersValidation\n description: Optional. Validation rules to be applied to this parameter's\n value.\n x-kubernetes-immutable: true\n properties:\n regex:\n type: object\n x-dcl-go-name: Regex\n x-dcl-go-type: WorkflowTemplateParametersValidationRegex\n description: Validation based on regular expressions.\n x-kubernetes-immutable: true\n required:\n - regexes\n properties:\n regexes:\n type: array\n x-dcl-go-name: Regexes\n description: Required. RE2 regular expressions used to validate\n the parameter's value. The value must match the regex in\n its entirety (substring matches are not sufficient).\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n values:\n type: object\n x-dcl-go-name: Values\n x-dcl-go-type: WorkflowTemplateParametersValidationValues\n description: Validation based on a list of allowed values.\n x-kubernetes-immutable: true\n required:\n - values\n properties:\n values:\n type: array\n x-dcl-go-name: Values\n description: Required. List of allowed values for the parameter.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n placement:\n type: object\n x-dcl-go-name: Placement\n x-dcl-go-type: WorkflowTemplatePlacement\n description: Required. WorkflowTemplate scheduling information.\n x-kubernetes-immutable: true\n properties:\n clusterSelector:\n type: object\n x-dcl-go-name: ClusterSelector\n x-dcl-go-type: WorkflowTemplatePlacementClusterSelector\n description: Optional. A selector that chooses target cluster for jobs\n based on metadata. The selector is evaluated at the time each job\n is submitted.\n x-kubernetes-immutable: true\n required:\n - clusterLabels\n properties:\n clusterLabels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ClusterLabels\n description: Required. The cluster labels. Cluster must have all\n labels to match.\n x-kubernetes-immutable: true\n zone:\n type: string\n x-dcl-go-name: Zone\n description: Optional. The zone where workflow process executes.\n This parameter does not affect the selection of the cluster. If\n unspecified, the zone of the first cluster matching the selector\n is used.\n x-kubernetes-immutable: true\n managedCluster:\n type: object\n x-dcl-go-name: ManagedCluster\n x-dcl-go-type: WorkflowTemplatePlacementManagedCluster\n description: A cluster that is managed by the workflow.\n x-kubernetes-immutable: true\n required:\n - clusterName\n - config\n properties:\n clusterName:\n type: string\n x-dcl-go-name: ClusterName\n description: Required. The cluster name prefix. A unique cluster\n name will be formed by appending a random suffix. The name must\n contain only lower-case letters (a-z), numbers (0-9), and hyphens\n (-). Must begin with a letter. Cannot begin or end with hyphen.\n Must consist of between 2 and 35 characters.\n x-kubernetes-immutable: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfig\n description: Required. The cluster configuration.\n x-kubernetes-immutable: true\n properties:\n autoscalingConfig:\n type: object\n x-dcl-go-name: AutoscalingConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig\n description: Optional. Autoscaling config for the policy associated\n with the cluster. Cluster does not autoscale if this field\n is unset.\n x-kubernetes-immutable: true\n properties:\n policy:\n type: string\n x-dcl-go-name: Policy\n description: 'Optional. The autoscaling policy used by the\n cluster. Only resource names including projectid and location\n (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`\n * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`\n Note that the policy must be in the same project and Dataproc\n region.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Dataproc/AutoscalingPolicy\n field: name\n encryptionConfig:\n type: object\n x-dcl-go-name: EncryptionConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig\n description: Optional. Encryption settings for the cluster.\n x-kubernetes-immutable: true\n properties:\n gcePdKmsKeyName:\n type: string\n x-dcl-go-name: GcePdKmsKeyName\n description: Optional. The Cloud KMS key name to use for\n PD disk encryption for all instances in the cluster.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: selfLink\n endpointConfig:\n type: object\n x-dcl-go-name: EndpointConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigEndpointConfig\n description: Optional. Port/endpoint configuration for this\n cluster\n x-kubernetes-immutable: true\n properties:\n enableHttpPortAccess:\n type: boolean\n x-dcl-go-name: EnableHttpPortAccess\n description: Optional. If true, enable http access to specific\n ports on the cluster from external sources. Defaults to\n false.\n x-kubernetes-immutable: true\n httpPorts:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: HttpPorts\n readOnly: true\n description: Output only. The map of port descriptions to\n URLs. Will only be populated if enable_http_port_access\n is true.\n x-kubernetes-immutable: true\n gceClusterConfig:\n type: object\n x-dcl-go-name: GceClusterConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig\n description: Optional. The shared Compute Engine config settings\n for all instances in a cluster.\n x-kubernetes-immutable: true\n properties:\n internalIPOnly:\n type: boolean\n x-dcl-go-name: InternalIPOnly\n description: Optional. If true, all instances in the cluster\n will only have internal IP addresses. By default, clusters\n are not restricted to internal IP addresses, and will\n have ephemeral external IP addresses assigned to each\n instance. This `internal_ip_only` restriction can only\n be enabled for subnetwork enabled networks, and all off-cluster\n dependencies must be configured to be accessible without\n external IP addresses.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n metadata:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Metadata\n description: The Compute Engine metadata entries to add\n to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).\n x-kubernetes-immutable: true\n network:\n type: string\n x-dcl-go-name: Network\n description: 'Optional. The Compute Engine network to be\n used for machine communications. Cannot be specified with\n subnetwork_uri. If neither `network_uri` nor `subnetwork_uri`\n is specified, the \"default\" network of the project is\n used, if it exists. Cannot be a \"Custom Subnet Network\"\n (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks)\n for more information). A full URL, partial URI, or short\n name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`\n * `projects/[project_id]/regions/global/default` * `default`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Network\n field: selfLink\n nodeGroupAffinity:\n type: object\n x-dcl-go-name: NodeGroupAffinity\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity\n description: Optional. Node Group Affinity for sole-tenant\n clusters.\n x-kubernetes-immutable: true\n required:\n - nodeGroup\n properties:\n nodeGroup:\n type: string\n x-dcl-go-name: NodeGroup\n description: 'Required. The URI of a sole-tenant [node\n group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)\n that the cluster will be created on. A full URL, partial\n URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`\n * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`\n * `node-group-1`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/NodeGroup\n field: selfLink\n privateIPv6GoogleAccess:\n type: string\n x-dcl-go-name: PrivateIPv6GoogleAccess\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum\n description: 'Optional. The type of IPv6 access for a cluster.\n Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED,\n INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL'\n x-kubernetes-immutable: true\n enum:\n - PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED\n - INHERIT_FROM_SUBNETWORK\n - OUTBOUND\n - BIDIRECTIONAL\n reservationAffinity:\n type: object\n x-dcl-go-name: ReservationAffinity\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity\n description: Optional. Reservation Affinity for consuming\n Zonal reservation.\n x-kubernetes-immutable: true\n properties:\n consumeReservationType:\n type: string\n x-dcl-go-name: ConsumeReservationType\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum\n description: 'Optional. Type of reservation to consume\n Possible values: TYPE_UNSPECIFIED, NO_RESERVATION,\n ANY_RESERVATION, SPECIFIC_RESERVATION'\n x-kubernetes-immutable: true\n enum:\n - TYPE_UNSPECIFIED\n - NO_RESERVATION\n - ANY_RESERVATION\n - SPECIFIC_RESERVATION\n key:\n type: string\n x-dcl-go-name: Key\n description: Optional. Corresponds to the label key\n of reservation resource.\n x-kubernetes-immutable: true\n values:\n type: array\n x-dcl-go-name: Values\n description: Optional. Corresponds to the label values\n of reservation resource.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc)\n (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))\n used by Dataproc cluster VM instances to access Google\n Cloud Platform services. If not specified, the [Compute\n Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account)\n is used.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n serviceAccountScopes:\n type: array\n x-dcl-go-name: ServiceAccountScopes\n description: 'Optional. The URIs of service account scopes\n to be included in Compute Engine instances. The following\n base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly\n * https://www.googleapis.com/auth/devstorage.read_write\n * https://www.googleapis.com/auth/logging.write If no\n scopes are specified, the following defaults are also\n provided: * https://www.googleapis.com/auth/bigquery *\n https://www.googleapis.com/auth/bigtable.admin.table *\n https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n subnetwork:\n type: string\n x-dcl-go-name: Subnetwork\n description: 'Optional. The Compute Engine subnetwork to\n be used for machine communications. Cannot be specified\n with network_uri. A full URL, partial URI, or short name\n are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0`\n * `projects/[project_id]/regions/us-east1/subnetworks/sub0`\n * `sub0`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Subnetwork\n field: selfLink\n tags:\n type: array\n x-dcl-go-name: Tags\n description: The Compute Engine tags to add to all instances\n (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: string\n x-dcl-go-type: string\n zone:\n type: string\n x-dcl-go-name: Zone\n description: 'Optional. The zone where the Compute Engine\n cluster will be located. On a create request, it is required\n in the \"global\" region. If omitted in a non-global Dataproc\n region, the service will pick a zone in the corresponding\n Compute Engine region. On a get request, zone will always\n be present. A full URL, partial URI, or short name are\n valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`\n * `projects/[project_id]/zones/[zone]` * `us-central1-f`'\n x-kubernetes-immutable: true\n gkeClusterConfig:\n type: object\n x-dcl-go-name: GkeClusterConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig\n description: Optional. BETA. The Kubernetes Engine config for\n Dataproc clusters deployed to Kubernetes. Setting this is\n considered mutually exclusive with Compute Engine-based options\n such as `gce_cluster_config`, `master_config`, `worker_config`,\n `secondary_worker_config`, and `autoscaling_config`.\n x-kubernetes-immutable: true\n properties:\n namespacedGkeDeploymentTarget:\n type: object\n x-dcl-go-name: NamespacedGkeDeploymentTarget\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget\n description: Optional. A target for the deployment.\n x-kubernetes-immutable: true\n properties:\n clusterNamespace:\n type: string\n x-dcl-go-name: ClusterNamespace\n description: Optional. A namespace within the GKE cluster\n to deploy into.\n x-kubernetes-immutable: true\n targetGkeCluster:\n type: string\n x-dcl-go-name: TargetGkeCluster\n description: 'Optional. The target GKE cluster to deploy\n to. Format: ''projects/{project}/locations/{location}/clusters/{cluster_id}'''\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Container/Cluster\n field: name\n initializationActions:\n type: array\n x-dcl-go-name: InitializationActions\n description: 'Optional. Commands to execute on each node after\n config is completed. By default, executables are run on master\n and all worker nodes. You can test a node''s `role` metadata\n to run an executable on a master or worker node, as shown\n below using `curl` (you can also use `wget`): ROLE=$(curl\n -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)\n if [[ \"${ROLE}\" == ''Master'' ]]; then ... master specific\n actions ... else ... worker specific actions ... fi'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigInitializationActions\n properties:\n executableFile:\n type: string\n x-dcl-go-name: ExecutableFile\n description: Required. Cloud Storage URI of executable\n file.\n x-kubernetes-immutable: true\n executionTimeout:\n type: string\n x-dcl-go-name: ExecutionTimeout\n description: Optional. Amount of time executable has to\n complete. Default is 10 minutes (see JSON representation\n of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n Cluster creation fails with an explanatory error message\n (the name of the executable that caused the error and\n the exceeded timeout period) if the executable is not\n completed at end of the timeout period.\n x-kubernetes-immutable: true\n lifecycleConfig:\n type: object\n x-dcl-go-name: LifecycleConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig\n description: Optional. Lifecycle setting for the cluster.\n x-kubernetes-immutable: true\n properties:\n autoDeleteTime:\n type: string\n format: date-time\n x-dcl-go-name: AutoDeleteTime\n description: Optional. The time when cluster will be auto-deleted\n (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n autoDeleteTtl:\n type: string\n x-dcl-go-name: AutoDeleteTtl\n description: Optional. The lifetime duration of cluster.\n The cluster will be auto-deleted at the end of this period.\n Minimum value is 10 minutes; maximum value is 14 days\n (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n idleDeleteTtl:\n type: string\n x-dcl-go-name: IdleDeleteTtl\n description: Optional. The duration to keep the cluster\n alive while idling (when no jobs are running). Passing\n this threshold will cause the cluster to be deleted. Minimum\n value is 5 minutes; maximum value is 14 days (see JSON\n representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n idleStartTime:\n type: string\n format: date-time\n x-dcl-go-name: IdleStartTime\n readOnly: true\n description: Output only. The time when cluster became idle\n (most recent job finished) and became eligible for deletion\n due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n masterConfig:\n type: object\n x-dcl-go-name: MasterConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfig\n description: Optional. The Compute Engine config settings for\n the master instance in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n metastoreConfig:\n type: object\n x-dcl-go-name: MetastoreConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig\n description: Optional. Metastore configuration.\n x-kubernetes-immutable: true\n required:\n - dataprocMetastoreService\n properties:\n dataprocMetastoreService:\n type: string\n x-dcl-go-name: DataprocMetastoreService\n description: 'Required. Resource name of an existing Dataproc\n Metastore service. Example: * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Metastore/Service\n field: selfLink\n secondaryWorkerConfig:\n type: object\n x-dcl-go-name: SecondaryWorkerConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig\n description: Optional. The Compute Engine config settings for\n additional worker instances in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n securityConfig:\n type: object\n x-dcl-go-name: SecurityConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecurityConfig\n description: Optional. Security settings for the cluster.\n x-kubernetes-immutable: true\n properties:\n kerberosConfig:\n type: object\n x-dcl-go-name: KerberosConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig\n description: Optional. Kerberos related configuration.\n x-kubernetes-immutable: true\n properties:\n crossRealmTrustAdminServer:\n type: string\n x-dcl-go-name: CrossRealmTrustAdminServer\n description: Optional. The admin server (IP or hostname)\n for the remote trusted realm in a cross realm trust\n relationship.\n x-kubernetes-immutable: true\n crossRealmTrustKdc:\n type: string\n x-dcl-go-name: CrossRealmTrustKdc\n description: Optional. The KDC (IP or hostname) for\n the remote trusted realm in a cross realm trust relationship.\n x-kubernetes-immutable: true\n crossRealmTrustRealm:\n type: string\n x-dcl-go-name: CrossRealmTrustRealm\n description: Optional. The remote realm the Dataproc\n on-cluster KDC will trust, should the user enable\n cross realm trust.\n x-kubernetes-immutable: true\n crossRealmTrustSharedPassword:\n type: string\n x-dcl-go-name: CrossRealmTrustSharedPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the shared password between\n the on-cluster Kerberos realm and the remote trusted\n realm, in a cross realm trust relationship.\n x-kubernetes-immutable: true\n enableKerberos:\n type: boolean\n x-dcl-go-name: EnableKerberos\n description: 'Optional. Flag to indicate whether to\n Kerberize the cluster (default: false). Set this field\n to true to enable Kerberos on a cluster.'\n x-kubernetes-immutable: true\n kdcDbKey:\n type: string\n x-dcl-go-name: KdcDbKey\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the master key of the KDC\n database.\n x-kubernetes-immutable: true\n keyPassword:\n type: string\n x-dcl-go-name: KeyPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided key. For the self-signed certificate, this\n password is generated by Dataproc.\n x-kubernetes-immutable: true\n keystore:\n type: string\n x-dcl-go-name: Keystore\n description: Optional. The Cloud Storage URI of the\n keystore file used for SSL encryption. If not provided,\n Dataproc will provide a self-signed certificate.\n x-kubernetes-immutable: true\n keystorePassword:\n type: string\n x-dcl-go-name: KeystorePassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided keystore. For the self-signed certificate,\n this password is generated by Dataproc.\n x-kubernetes-immutable: true\n kmsKey:\n type: string\n x-dcl-go-name: KmsKey\n description: Optional. The uri of the KMS key used to\n encrypt various sensitive files.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: selfLink\n realm:\n type: string\n x-dcl-go-name: Realm\n description: Optional. The name of the on-cluster Kerberos\n realm. If not specified, the uppercased domain of\n hostnames will be the realm.\n x-kubernetes-immutable: true\n rootPrincipalPassword:\n type: string\n x-dcl-go-name: RootPrincipalPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the root principal password.\n x-kubernetes-immutable: true\n tgtLifetimeHours:\n type: integer\n format: int64\n x-dcl-go-name: TgtLifetimeHours\n description: Optional. The lifetime of the ticket granting\n ticket, in hours. If not specified, or user specifies\n 0, then default value 10 will be used.\n x-kubernetes-immutable: true\n truststore:\n type: string\n x-dcl-go-name: Truststore\n description: Optional. The Cloud Storage URI of the\n truststore file used for SSL encryption. If not provided,\n Dataproc will provide a self-signed certificate.\n x-kubernetes-immutable: true\n truststorePassword:\n type: string\n x-dcl-go-name: TruststorePassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided truststore. For the self-signed certificate,\n this password is generated by Dataproc.\n x-kubernetes-immutable: true\n softwareConfig:\n type: object\n x-dcl-go-name: SoftwareConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig\n description: Optional. The config settings for software inside\n the cluster.\n x-kubernetes-immutable: true\n properties:\n imageVersion:\n type: string\n x-dcl-go-name: ImageVersion\n description: Optional. The version of software inside the\n cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions),\n such as \"1.2\" (including a subminor version, such as \"1.2.29\"),\n or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).\n If unspecified, it defaults to the latest Debian version.\n x-kubernetes-immutable: true\n optionalComponents:\n type: array\n x-dcl-go-name: OptionalComponents\n description: Optional. The set of components to activate\n on the cluster.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum\n enum:\n - COMPONENT_UNSPECIFIED\n - ANACONDA\n - DOCKER\n - DRUID\n - FLINK\n - HBASE\n - HIVE_WEBHCAT\n - JUPYTER\n - KERBEROS\n - PRESTO\n - RANGER\n - SOLR\n - ZEPPELIN\n - ZOOKEEPER\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: 'Optional. The properties to set on daemon\n config files. Property keys are specified in `prefix:property`\n format, for example `core:hadoop.tmp.dir`. The following\n are supported prefixes and their mappings: * capacity-scheduler:\n `capacity-scheduler.xml` * core: `core-site.xml` * distcp:\n `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml`\n * mapred: `mapred-site.xml` * pig: `pig.properties` *\n spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For\n more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).'\n x-kubernetes-immutable: true\n stagingBucket:\n type: string\n x-dcl-go-name: StagingBucket\n description: Optional. A Cloud Storage bucket used to stage\n job dependencies, config files, and job driver console output.\n If you do not specify a staging bucket, Cloud Dataproc will\n determine a Cloud Storage location (US, ASIA, or EU) for your\n cluster's staging bucket according to the Compute Engine zone\n where your cluster is deployed, and then create and manage\n this project-level, per-location bucket (see [Dataproc staging\n bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).\n **This field requires a Cloud Storage bucket name, not a URI\n to a Cloud Storage bucket.**\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n tempBucket:\n type: string\n x-dcl-go-name: TempBucket\n description: Optional. A Cloud Storage bucket used to store\n ephemeral cluster and jobs data, such as Spark and MapReduce\n history files. If you do not specify a temp bucket, Dataproc\n will determine a Cloud Storage location (US, ASIA, or EU)\n for your cluster's temp bucket according to the Compute Engine\n zone where your cluster is deployed, and then create and manage\n this project-level, per-location bucket. The default bucket\n has a TTL of 90 days, but you can use any TTL (or none) if\n you specify a bucket. **This field requires a Cloud Storage\n bucket name, not a URI to a Cloud Storage bucket.**\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n workerConfig:\n type: object\n x-dcl-go-name: WorkerConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfig\n description: Optional. The Compute Engine config settings for\n worker instances in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The labels to associate with this cluster.\n Label keys must be between 1 and 63 characters long, and must\n conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62}\n Label values must be between 1 and 63 characters long, and must\n conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63}\n No more than 32 labels can be associated with a given cluster.'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time template was last updated.\n x-kubernetes-immutable: true\n version:\n type: integer\n format: int64\n x-dcl-go-name: Version\n readOnly: true\n description: Output only. The current version of this workflow template.\n x-kubernetes-immutable: true\n") +var YAML_workflow_template = []byte("info:\n title: Dataproc/WorkflowTemplate\n description: The Dataproc WorkflowTemplate resource\n x-dcl-struct-name: WorkflowTemplate\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n apply:\n description: The function used to apply information about a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n delete:\n description: The function used to delete a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n deleteAll:\n description: The function used to delete all WorkflowTemplate\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many WorkflowTemplate\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n WorkflowTemplate:\n title: WorkflowTemplate\n x-dcl-id: projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - placement\n - jobs\n - project\n - location\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time template was created.\n x-kubernetes-immutable: true\n dagTimeout:\n type: string\n x-dcl-go-name: DagTimeout\n description: Optional. Timeout duration for the DAG of jobs, expressed in\n seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n The timeout duration must be from 10 minutes (\"600s\") to 24 hours (\"86400s\").\n The timer begins when the first job is submitted. If the workflow is running\n at the end of the timeout period, any remaining jobs are cancelled, the\n workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),\n the cluster is deleted.\n x-kubernetes-immutable: true\n jobs:\n type: array\n x-dcl-go-name: Jobs\n description: Required. The Directed Acyclic Graph of Jobs to submit.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplateJobs\n required:\n - stepId\n properties:\n hadoopJob:\n type: object\n x-dcl-go-name: HadoopJob\n x-dcl-go-type: WorkflowTemplateJobsHadoopJob\n description: Optional. Job is a Hadoop job.\n x-kubernetes-immutable: true\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n in the working directory of Hadoop drivers and tasks. Supported\n file types: .jar, .tar, .tar.gz, .tgz, or .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `-libjars` or `-Dfoo=bar`, that\n can be set as job properties, since a collision may occur that\n causes an incorrect job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS (Hadoop Compatible Filesystem) URIs\n of files to be copied to the working directory of Hadoop drivers\n and distributed tasks. Useful for naively parallel tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. Jar file URIs to add to the CLASSPATHs\n of the Hadoop driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsHadoopJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainClass:\n type: string\n x-dcl-go-name: MainClass\n description: The name of the driver's main class. The jar file\n containing the class must be in the default CLASSPATH or specified\n in `jar_file_uris`.\n x-kubernetes-immutable: true\n mainJarFileUri:\n type: string\n x-dcl-go-name: MainJarFileUri\n description: 'The HCFS URI of the jar file containing the main\n class. Examples: ''gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar''\n ''hdfs:/tmp/test-samples/custom-wordcount.jar'' ''file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Hadoop. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site and classes in user code.\n x-kubernetes-immutable: true\n hiveJob:\n type: object\n x-dcl-go-name: HiveJob\n x-dcl-go-type: WorkflowTemplateJobsHiveJob\n description: Optional. Job is a Hive job.\n x-kubernetes-immutable: true\n properties:\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATH\n of the Hive server and Hadoop MapReduce (MR) tasks. Can contain\n Hive SerDes and UDFs.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names and values,\n used to configure Hive. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml,\n and classes in user code.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains Hive queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsHiveJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\" {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Hive command: `SET name=\"value\";`).'\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The labels to associate with this job. Label\n keys must be between 1 and 63 characters long, and must conform\n to the following regular expression: p{Ll}p{Lo}{0,62} Label values\n must be between 1 and 63 characters long, and must conform to the\n following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than\n 32 labels can be associated with a given job.'\n x-kubernetes-immutable: true\n pigJob:\n type: object\n x-dcl-go-name: PigJob\n x-dcl-go-type: WorkflowTemplateJobsPigJob\n description: Optional. Job is a Pig job.\n x-kubernetes-immutable: true\n properties:\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATH\n of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain\n Pig UDFs.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPigJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Pig. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties,\n and classes in user code.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains the Pig\n queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsPigJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\" {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Pig command: `name=[value]`).'\n x-kubernetes-immutable: true\n prerequisiteStepIds:\n type: array\n x-dcl-go-name: PrerequisiteStepIds\n description: Optional. The optional list of prerequisite job step_ids.\n If not specified, the job will start at the beginning of workflow.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n prestoJob:\n type: object\n x-dcl-go-name: PrestoJob\n x-dcl-go-type: WorkflowTemplateJobsPrestoJob\n description: Optional. Job is a Presto job.\n x-kubernetes-immutable: true\n properties:\n clientTags:\n type: array\n x-dcl-go-name: ClientTags\n description: Optional. Presto client tags to attach to this query\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPrestoJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n outputFormat:\n type: string\n x-dcl-go-name: OutputFormat\n description: Optional. The format in which query output will be\n displayed. See the Presto documentation for supported output\n formats\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values.\n Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html)\n Equivalent to using the --session flag in the Presto CLI\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains SQL queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsPrestoJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\" {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n pysparkJob:\n type: object\n x-dcl-go-name: PysparkJob\n x-dcl-go-type: WorkflowTemplateJobsPysparkJob\n description: Optional. Job is a PySpark job.\n x-kubernetes-immutable: true\n required:\n - mainPythonFileUri\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATHs\n of the Python driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPysparkJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainPythonFileUri:\n type: string\n x-dcl-go-name: MainPythonFileUri\n description: Required. The HCFS URI of the main Python file to\n use as the driver. Must be a .py file.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure PySpark. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n pythonFileUris:\n type: array\n x-dcl-go-name: PythonFileUris\n description: 'Optional. HCFS file URIs of Python files to pass\n to the PySpark framework. Supported file types: .py, .egg, and\n .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scheduling:\n type: object\n x-dcl-go-name: Scheduling\n x-dcl-go-type: WorkflowTemplateJobsScheduling\n description: Optional. Job scheduling configuration.\n x-kubernetes-immutable: true\n properties:\n maxFailuresPerHour:\n type: integer\n format: int64\n x-dcl-go-name: MaxFailuresPerHour\n description: Optional. Maximum number of times per hour a driver\n may be restarted as a result of driver exiting with non-zero\n code before job is reported failed. A job may be reported as\n thrashing if driver exits with non-zero code 4 times within\n 10 minute window. Maximum value is 10.\n x-kubernetes-immutable: true\n maxFailuresTotal:\n type: integer\n format: int64\n x-dcl-go-name: MaxFailuresTotal\n description: Optional. Maximum number of times in total a driver\n may be restarted as a result of driver exiting with non-zero\n code before job is reported failed. Maximum value is 240.\n x-kubernetes-immutable: true\n sparkJob:\n type: object\n x-dcl-go-name: SparkJob\n x-dcl-go-type: WorkflowTemplateJobsSparkJob\n description: Optional. Job is a Spark job.\n x-kubernetes-immutable: true\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATHs\n of the Spark driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainClass:\n type: string\n x-dcl-go-name: MainClass\n description: The name of the driver's main class. The jar file\n that contains the class must be in the default CLASSPATH or\n specified in `jar_file_uris`.\n x-kubernetes-immutable: true\n mainJarFileUri:\n type: string\n x-dcl-go-name: MainJarFileUri\n description: The HCFS URI of the jar file that contains the main\n class.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Spark. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n sparkRJob:\n type: object\n x-dcl-go-name: SparkRJob\n x-dcl-go-type: WorkflowTemplateJobsSparkRJob\n description: Optional. Job is a SparkR job.\n x-kubernetes-immutable: true\n required:\n - mainRFileUri\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkRJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainRFileUri:\n type: string\n x-dcl-go-name: MainRFileUri\n description: Required. The HCFS URI of the main R file to use\n as the driver. Must be a .R file.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure SparkR. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n sparkSqlJob:\n type: object\n x-dcl-go-name: SparkSqlJob\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJob\n description: Optional. Job is a SparkSql job.\n x-kubernetes-immutable: true\n properties:\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to be added to the\n Spark CLASSPATH.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Spark SQL's SparkConf. Properties that conflict\n with values set by the Dataproc API may be overwritten.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains SQL queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\" {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Spark SQL command: SET `name=\"value\";`).'\n x-kubernetes-immutable: true\n stepId:\n type: string\n x-dcl-go-name: StepId\n description: Required. The step id. The id must be unique among all\n jobs within the template. The step id is used as prefix for job\n id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds\n field from other steps. The id must contain only letters (a-z, A-Z),\n numbers (0-9), underscores (_), and hyphens (-). Cannot begin or\n end with underscore or hyphen. Must consist of between 3 and 50\n characters.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. The labels to associate with this template. These\n labels will be propagated to all jobs and clusters created by the workflow\n instance. Label **keys** must contain 1 to 63 characters, and must conform\n to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values**\n may be empty, but, if present, must contain 1 to 63 characters, and must\n conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than\n 32 labels can be associated with a template.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Output only. The resource name of the workflow template, as\n described in https://cloud.google.com/apis/design/resource_names. * For\n `projects.regions.workflowTemplates`, the resource name of the template\n has the following format: `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`\n * For `projects.locations.workflowTemplates`, the resource name of the\n template has the following format: `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`'\n x-kubernetes-immutable: true\n parameters:\n type: array\n x-dcl-go-name: Parameters\n description: Optional. Template parameters whose values are substituted\n into the template. Values for parameters must be provided when the template\n is instantiated.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplateParameters\n required:\n - name\n - fields\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Brief description of the parameter. Must not\n exceed 1024 characters.\n x-kubernetes-immutable: true\n fields:\n type: array\n x-dcl-go-name: Fields\n description: 'Required. Paths to all fields that the parameter replaces.\n A field is allowed to appear in at most one parameter''s list of\n field paths. A field path is similar in syntax to a google.protobuf.FieldMask.\n For example, a field path that references the zone field of a workflow\n template''s cluster selector would be specified as `placement.clusterSelector.zone`.\n Also, field paths can reference fields using the following syntax:\n * Values in maps can be referenced by key: * labels[''key''] * placement.clusterSelector.clusterLabels[''key'']\n * placement.managedCluster.labels[''key''] * placement.clusterSelector.clusterLabels[''key'']\n * jobs[''step-id''].labels[''key''] * Jobs in the jobs list can\n be referenced by step-id: * jobs[''step-id''].hadoopJob.mainJarFileUri\n * jobs[''step-id''].hiveJob.queryFileUri * jobs[''step-id''].pySparkJob.mainPythonFileUri\n * jobs[''step-id''].hadoopJob.jarFileUris[0] * jobs[''step-id''].hadoopJob.archiveUris[0]\n * jobs[''step-id''].hadoopJob.fileUris[0] * jobs[''step-id''].pySparkJob.pythonFileUris[0]\n * Items in repeated fields can be referenced by a zero-based index:\n * jobs[''step-id''].sparkJob.args[0] * Other examples: * jobs[''step-id''].hadoopJob.properties[''key'']\n * jobs[''step-id''].hadoopJob.args[0] * jobs[''step-id''].hiveJob.scriptVariables[''key'']\n * jobs[''step-id''].hadoopJob.mainJarFileUri * placement.clusterSelector.zone\n It may not be possible to parameterize maps and repeated fields\n in their entirety since only individual map values and individual\n items in repeated fields can be referenced. For example, the following\n field paths are invalid: - placement.clusterSelector.clusterLabels\n - jobs[''step-id''].sparkJob.args'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Parameter name. The parameter name is used\n as the key, and paired with the parameter value, which are passed\n to the template when the template is instantiated. The name must\n contain only capital letters (A-Z), numbers (0-9), and underscores\n (_), and must not start with a number. The maximum length is 40\n characters.\n x-kubernetes-immutable: true\n validation:\n type: object\n x-dcl-go-name: Validation\n x-dcl-go-type: WorkflowTemplateParametersValidation\n description: Optional. Validation rules to be applied to this parameter's\n value.\n x-kubernetes-immutable: true\n properties:\n regex:\n type: object\n x-dcl-go-name: Regex\n x-dcl-go-type: WorkflowTemplateParametersValidationRegex\n description: Validation based on regular expressions.\n x-kubernetes-immutable: true\n required:\n - regexes\n properties:\n regexes:\n type: array\n x-dcl-go-name: Regexes\n description: Required. RE2 regular expressions used to validate\n the parameter's value. The value must match the regex in\n its entirety (substring matches are not sufficient).\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n values:\n type: object\n x-dcl-go-name: Values\n x-dcl-go-type: WorkflowTemplateParametersValidationValues\n description: Validation based on a list of allowed values.\n x-kubernetes-immutable: true\n required:\n - values\n properties:\n values:\n type: array\n x-dcl-go-name: Values\n description: Required. List of allowed values for the parameter.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n placement:\n type: object\n x-dcl-go-name: Placement\n x-dcl-go-type: WorkflowTemplatePlacement\n description: Required. WorkflowTemplate scheduling information.\n x-kubernetes-immutable: true\n properties:\n clusterSelector:\n type: object\n x-dcl-go-name: ClusterSelector\n x-dcl-go-type: WorkflowTemplatePlacementClusterSelector\n description: Optional. A selector that chooses target cluster for jobs\n based on metadata. The selector is evaluated at the time each job\n is submitted.\n x-kubernetes-immutable: true\n required:\n - clusterLabels\n properties:\n clusterLabels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ClusterLabels\n description: Required. The cluster labels. Cluster must have all\n labels to match.\n x-kubernetes-immutable: true\n zone:\n type: string\n x-dcl-go-name: Zone\n description: Optional. The zone where workflow process executes.\n This parameter does not affect the selection of the cluster. If\n unspecified, the zone of the first cluster matching the selector\n is used.\n x-kubernetes-immutable: true\n managedCluster:\n type: object\n x-dcl-go-name: ManagedCluster\n x-dcl-go-type: WorkflowTemplatePlacementManagedCluster\n description: A cluster that is managed by the workflow.\n x-kubernetes-immutable: true\n required:\n - clusterName\n - config\n properties:\n clusterName:\n type: string\n x-dcl-go-name: ClusterName\n description: Required. The cluster name prefix. A unique cluster\n name will be formed by appending a random suffix. The name must\n contain only lower-case letters (a-z), numbers (0-9), and hyphens\n (-). Must begin with a letter. Cannot begin or end with hyphen.\n Must consist of between 2 and 35 characters.\n x-kubernetes-immutable: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfig\n description: Required. The cluster configuration.\n x-kubernetes-immutable: true\n properties:\n autoscalingConfig:\n type: object\n x-dcl-go-name: AutoscalingConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig\n description: Optional. Autoscaling config for the policy associated\n with the cluster. Cluster does not autoscale if this field\n is unset.\n x-kubernetes-immutable: true\n properties:\n policy:\n type: string\n x-dcl-go-name: Policy\n description: 'Optional. The autoscaling policy used by the\n cluster. Only resource names including projectid and location\n (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`\n * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`\n Note that the policy must be in the same project and Dataproc\n region.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Dataproc/AutoscalingPolicy\n field: name\n encryptionConfig:\n type: object\n x-dcl-go-name: EncryptionConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig\n description: Optional. Encryption settings for the cluster.\n x-kubernetes-immutable: true\n properties:\n gcePdKmsKeyName:\n type: string\n x-dcl-go-name: GcePdKmsKeyName\n description: Optional. The Cloud KMS key name to use for\n PD disk encryption for all instances in the cluster.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: selfLink\n endpointConfig:\n type: object\n x-dcl-go-name: EndpointConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigEndpointConfig\n description: Optional. Port/endpoint configuration for this\n cluster\n x-kubernetes-immutable: true\n properties:\n enableHttpPortAccess:\n type: boolean\n x-dcl-go-name: EnableHttpPortAccess\n description: Optional. If true, enable http access to specific\n ports on the cluster from external sources. Defaults to\n false.\n x-kubernetes-immutable: true\n httpPorts:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: HttpPorts\n readOnly: true\n description: Output only. The map of port descriptions to\n URLs. Will only be populated if enable_http_port_access\n is true.\n x-kubernetes-immutable: true\n gceClusterConfig:\n type: object\n x-dcl-go-name: GceClusterConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig\n description: Optional. The shared Compute Engine config settings\n for all instances in a cluster.\n x-kubernetes-immutable: true\n properties:\n internalIPOnly:\n type: boolean\n x-dcl-go-name: InternalIPOnly\n description: Optional. If true, all instances in the cluster\n will only have internal IP addresses. By default, clusters\n are not restricted to internal IP addresses, and will\n have ephemeral external IP addresses assigned to each\n instance. This `internal_ip_only` restriction can only\n be enabled for subnetwork enabled networks, and all off-cluster\n dependencies must be configured to be accessible without\n external IP addresses.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n metadata:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Metadata\n description: The Compute Engine metadata entries to add\n to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).\n x-kubernetes-immutable: true\n network:\n type: string\n x-dcl-go-name: Network\n description: 'Optional. The Compute Engine network to be\n used for machine communications. Cannot be specified with\n subnetwork_uri. If neither `network_uri` nor `subnetwork_uri`\n is specified, the \"default\" network of the project is\n used, if it exists. Cannot be a \"Custom Subnet Network\"\n (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks)\n for more information). A full URL, partial URI, or short\n name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`\n * `projects/[project_id]/regions/global/default` * `default`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Network\n field: selfLink\n nodeGroupAffinity:\n type: object\n x-dcl-go-name: NodeGroupAffinity\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity\n description: Optional. Node Group Affinity for sole-tenant\n clusters.\n x-kubernetes-immutable: true\n required:\n - nodeGroup\n properties:\n nodeGroup:\n type: string\n x-dcl-go-name: NodeGroup\n description: 'Required. The URI of a sole-tenant [node\n group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)\n that the cluster will be created on. A full URL, partial\n URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`\n * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`\n * `node-group-1`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/NodeGroup\n field: selfLink\n privateIPv6GoogleAccess:\n type: string\n x-dcl-go-name: PrivateIPv6GoogleAccess\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum\n description: 'Optional. The type of IPv6 access for a cluster.\n Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED,\n INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL'\n x-kubernetes-immutable: true\n enum:\n - PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED\n - INHERIT_FROM_SUBNETWORK\n - OUTBOUND\n - BIDIRECTIONAL\n reservationAffinity:\n type: object\n x-dcl-go-name: ReservationAffinity\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity\n description: Optional. Reservation Affinity for consuming\n Zonal reservation.\n x-kubernetes-immutable: true\n properties:\n consumeReservationType:\n type: string\n x-dcl-go-name: ConsumeReservationType\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum\n description: 'Optional. Type of reservation to consume\n Possible values: TYPE_UNSPECIFIED, NO_RESERVATION,\n ANY_RESERVATION, SPECIFIC_RESERVATION'\n x-kubernetes-immutable: true\n enum:\n - TYPE_UNSPECIFIED\n - NO_RESERVATION\n - ANY_RESERVATION\n - SPECIFIC_RESERVATION\n key:\n type: string\n x-dcl-go-name: Key\n description: Optional. Corresponds to the label key\n of reservation resource.\n x-kubernetes-immutable: true\n values:\n type: array\n x-dcl-go-name: Values\n description: Optional. Corresponds to the label values\n of reservation resource.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc)\n (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))\n used by Dataproc cluster VM instances to access Google\n Cloud Platform services. If not specified, the [Compute\n Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account)\n is used.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n serviceAccountScopes:\n type: array\n x-dcl-go-name: ServiceAccountScopes\n description: 'Optional. The URIs of service account scopes\n to be included in Compute Engine instances. The following\n base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly\n * https://www.googleapis.com/auth/devstorage.read_write\n * https://www.googleapis.com/auth/logging.write If no\n scopes are specified, the following defaults are also\n provided: * https://www.googleapis.com/auth/bigquery *\n https://www.googleapis.com/auth/bigtable.admin.table *\n https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n subnetwork:\n type: string\n x-dcl-go-name: Subnetwork\n description: 'Optional. The Compute Engine subnetwork to\n be used for machine communications. Cannot be specified\n with network_uri. A full URL, partial URI, or short name\n are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0`\n * `projects/[project_id]/regions/us-east1/subnetworks/sub0`\n * `sub0`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Subnetwork\n field: selfLink\n tags:\n type: array\n x-dcl-go-name: Tags\n description: The Compute Engine tags to add to all instances\n (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: string\n x-dcl-go-type: string\n zone:\n type: string\n x-dcl-go-name: Zone\n description: 'Optional. The zone where the Compute Engine\n cluster will be located. On a create request, it is required\n in the \"global\" region. If omitted in a non-global Dataproc\n region, the service will pick a zone in the corresponding\n Compute Engine region. On a get request, zone will always\n be present. A full URL, partial URI, or short name are\n valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`\n * `projects/[project_id]/zones/[zone]` * `us-central1-f`'\n x-kubernetes-immutable: true\n gkeClusterConfig:\n type: object\n x-dcl-go-name: GkeClusterConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig\n description: Optional. BETA. The Kubernetes Engine config for\n Dataproc clusters deployed to Kubernetes. Setting this is\n considered mutually exclusive with Compute Engine-based options\n such as `gce_cluster_config`, `master_config`, `worker_config`,\n `secondary_worker_config`, and `autoscaling_config`.\n x-kubernetes-immutable: true\n properties:\n namespacedGkeDeploymentTarget:\n type: object\n x-dcl-go-name: NamespacedGkeDeploymentTarget\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget\n description: Optional. A target for the deployment.\n x-kubernetes-immutable: true\n properties:\n clusterNamespace:\n type: string\n x-dcl-go-name: ClusterNamespace\n description: Optional. A namespace within the GKE cluster\n to deploy into.\n x-kubernetes-immutable: true\n targetGkeCluster:\n type: string\n x-dcl-go-name: TargetGkeCluster\n description: 'Optional. The target GKE cluster to deploy\n to. Format: ''projects/{project}/locations/{location}/clusters/{cluster_id}'''\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Container/Cluster\n field: name\n initializationActions:\n type: array\n x-dcl-go-name: InitializationActions\n description: 'Optional. Commands to execute on each node after\n config is completed. By default, executables are run on master\n and all worker nodes. You can test a node''s `role` metadata\n to run an executable on a master or worker node, as shown\n below using `curl` (you can also use `wget`): ROLE=$(curl\n -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)\n if [[ \"${ROLE}\" == ''Master'' ]]; then ... master specific\n actions ... else ... worker specific actions ... fi'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigInitializationActions\n properties:\n executableFile:\n type: string\n x-dcl-go-name: ExecutableFile\n description: Required. Cloud Storage URI of executable\n file.\n x-kubernetes-immutable: true\n executionTimeout:\n type: string\n x-dcl-go-name: ExecutionTimeout\n description: Optional. Amount of time executable has to\n complete. Default is 10 minutes (see JSON representation\n of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n Cluster creation fails with an explanatory error message\n (the name of the executable that caused the error and\n the exceeded timeout period) if the executable is not\n completed at end of the timeout period.\n x-kubernetes-immutable: true\n lifecycleConfig:\n type: object\n x-dcl-go-name: LifecycleConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig\n description: Optional. Lifecycle setting for the cluster.\n x-kubernetes-immutable: true\n properties:\n autoDeleteTime:\n type: string\n format: date-time\n x-dcl-go-name: AutoDeleteTime\n description: Optional. The time when cluster will be auto-deleted\n (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n autoDeleteTtl:\n type: string\n x-dcl-go-name: AutoDeleteTtl\n description: Optional. The lifetime duration of cluster.\n The cluster will be auto-deleted at the end of this period.\n Minimum value is 10 minutes; maximum value is 14 days\n (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n idleDeleteTtl:\n type: string\n x-dcl-go-name: IdleDeleteTtl\n description: Optional. The duration to keep the cluster\n alive while idling (when no jobs are running). Passing\n this threshold will cause the cluster to be deleted. Minimum\n value is 5 minutes; maximum value is 14 days (see JSON\n representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n idleStartTime:\n type: string\n format: date-time\n x-dcl-go-name: IdleStartTime\n readOnly: true\n description: Output only. The time when cluster became idle\n (most recent job finished) and became eligible for deletion\n due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n masterConfig:\n type: object\n x-dcl-go-name: MasterConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfig\n description: Optional. The Compute Engine config settings for\n the master instance in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n metastoreConfig:\n type: object\n x-dcl-go-name: MetastoreConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig\n description: Optional. Metastore configuration.\n x-kubernetes-immutable: true\n required:\n - dataprocMetastoreService\n properties:\n dataprocMetastoreService:\n type: string\n x-dcl-go-name: DataprocMetastoreService\n description: 'Required. Resource name of an existing Dataproc\n Metastore service. Example: * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Metastore/Service\n field: selfLink\n secondaryWorkerConfig:\n type: object\n x-dcl-go-name: SecondaryWorkerConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig\n description: Optional. The Compute Engine config settings for\n additional worker instances in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n securityConfig:\n type: object\n x-dcl-go-name: SecurityConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecurityConfig\n description: Optional. Security settings for the cluster.\n x-kubernetes-immutable: true\n properties:\n kerberosConfig:\n type: object\n x-dcl-go-name: KerberosConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig\n description: Optional. Kerberos related configuration.\n x-kubernetes-immutable: true\n properties:\n crossRealmTrustAdminServer:\n type: string\n x-dcl-go-name: CrossRealmTrustAdminServer\n description: Optional. The admin server (IP or hostname)\n for the remote trusted realm in a cross realm trust\n relationship.\n x-kubernetes-immutable: true\n crossRealmTrustKdc:\n type: string\n x-dcl-go-name: CrossRealmTrustKdc\n description: Optional. The KDC (IP or hostname) for\n the remote trusted realm in a cross realm trust relationship.\n x-kubernetes-immutable: true\n crossRealmTrustRealm:\n type: string\n x-dcl-go-name: CrossRealmTrustRealm\n description: Optional. The remote realm the Dataproc\n on-cluster KDC will trust, should the user enable\n cross realm trust.\n x-kubernetes-immutable: true\n crossRealmTrustSharedPassword:\n type: string\n x-dcl-go-name: CrossRealmTrustSharedPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the shared password between\n the on-cluster Kerberos realm and the remote trusted\n realm, in a cross realm trust relationship.\n x-kubernetes-immutable: true\n enableKerberos:\n type: boolean\n x-dcl-go-name: EnableKerberos\n description: 'Optional. Flag to indicate whether to\n Kerberize the cluster (default: false). Set this field\n to true to enable Kerberos on a cluster.'\n x-kubernetes-immutable: true\n kdcDbKey:\n type: string\n x-dcl-go-name: KdcDbKey\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the master key of the KDC\n database.\n x-kubernetes-immutable: true\n keyPassword:\n type: string\n x-dcl-go-name: KeyPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided key. For the self-signed certificate, this\n password is generated by Dataproc.\n x-kubernetes-immutable: true\n keystore:\n type: string\n x-dcl-go-name: Keystore\n description: Optional. The Cloud Storage URI of the\n keystore file used for SSL encryption. If not provided,\n Dataproc will provide a self-signed certificate.\n x-kubernetes-immutable: true\n keystorePassword:\n type: string\n x-dcl-go-name: KeystorePassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided keystore. For the self-signed certificate,\n this password is generated by Dataproc.\n x-kubernetes-immutable: true\n kmsKey:\n type: string\n x-dcl-go-name: KmsKey\n description: Optional. The uri of the KMS key used to\n encrypt various sensitive files.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: selfLink\n realm:\n type: string\n x-dcl-go-name: Realm\n description: Optional. The name of the on-cluster Kerberos\n realm. If not specified, the uppercased domain of\n hostnames will be the realm.\n x-kubernetes-immutable: true\n rootPrincipalPassword:\n type: string\n x-dcl-go-name: RootPrincipalPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the root principal password.\n x-kubernetes-immutable: true\n tgtLifetimeHours:\n type: integer\n format: int64\n x-dcl-go-name: TgtLifetimeHours\n description: Optional. The lifetime of the ticket granting\n ticket, in hours. If not specified, or user specifies\n 0, then default value 10 will be used.\n x-kubernetes-immutable: true\n truststore:\n type: string\n x-dcl-go-name: Truststore\n description: Optional. The Cloud Storage URI of the\n truststore file used for SSL encryption. If not provided,\n Dataproc will provide a self-signed certificate.\n x-kubernetes-immutable: true\n truststorePassword:\n type: string\n x-dcl-go-name: TruststorePassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided truststore. For the self-signed certificate,\n this password is generated by Dataproc.\n x-kubernetes-immutable: true\n softwareConfig:\n type: object\n x-dcl-go-name: SoftwareConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig\n description: Optional. The config settings for software inside\n the cluster.\n x-kubernetes-immutable: true\n properties:\n imageVersion:\n type: string\n x-dcl-go-name: ImageVersion\n description: Optional. The version of software inside the\n cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions),\n such as \"1.2\" (including a subminor version, such as \"1.2.29\"),\n or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).\n If unspecified, it defaults to the latest Debian version.\n x-kubernetes-immutable: true\n optionalComponents:\n type: array\n x-dcl-go-name: OptionalComponents\n description: Optional. The set of components to activate\n on the cluster.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum\n enum:\n - COMPONENT_UNSPECIFIED\n - ANACONDA\n - DOCKER\n - DRUID\n - FLINK\n - HBASE\n - HIVE_WEBHCAT\n - JUPYTER\n - KERBEROS\n - PRESTO\n - RANGER\n - SOLR\n - ZEPPELIN\n - ZOOKEEPER\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: 'Optional. The properties to set on daemon\n config files. Property keys are specified in `prefix:property`\n format, for example `core:hadoop.tmp.dir`. The following\n are supported prefixes and their mappings: * capacity-scheduler:\n `capacity-scheduler.xml` * core: `core-site.xml` * distcp:\n `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml`\n * mapred: `mapred-site.xml` * pig: `pig.properties` *\n spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For\n more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).'\n x-kubernetes-immutable: true\n stagingBucket:\n type: string\n x-dcl-go-name: StagingBucket\n description: Optional. A Cloud Storage bucket used to stage\n job dependencies, config files, and job driver console output.\n If you do not specify a staging bucket, Cloud Dataproc will\n determine a Cloud Storage location (US, ASIA, or EU) for your\n cluster's staging bucket according to the Compute Engine zone\n where your cluster is deployed, and then create and manage\n this project-level, per-location bucket (see [Dataproc staging\n bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).\n **This field requires a Cloud Storage bucket name, not a URI\n to a Cloud Storage bucket.**\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n tempBucket:\n type: string\n x-dcl-go-name: TempBucket\n description: Optional. A Cloud Storage bucket used to store\n ephemeral cluster and jobs data, such as Spark and MapReduce\n history files. If you do not specify a temp bucket, Dataproc\n will determine a Cloud Storage location (US, ASIA, or EU)\n for your cluster's temp bucket according to the Compute Engine\n zone where your cluster is deployed, and then create and manage\n this project-level, per-location bucket. The default bucket\n has a TTL of 90 days, but you can use any TTL (or none) if\n you specify a bucket. **This field requires a Cloud Storage\n bucket name, not a URI to a Cloud Storage bucket.**\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n workerConfig:\n type: object\n x-dcl-go-name: WorkerConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfig\n description: Optional. The Compute Engine config settings for\n worker instances in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The labels to associate with this cluster.\n Label keys must be between 1 and 63 characters long, and must\n conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62}\n Label values must be between 1 and 63 characters long, and must\n conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63}\n No more than 32 labels can be associated with a given cluster.'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time template was last updated.\n x-kubernetes-immutable: true\n version:\n type: integer\n format: int64\n x-dcl-go-name: Version\n readOnly: true\n description: Output only. The current version of this workflow template.\n x-kubernetes-immutable: true\n") -// 129144 bytes -// MD5: f1a8f3ed61de21a898ee0fcb46fc1170 +// 129140 bytes +// MD5: a94fac7581445bf365c300f60103ba27 diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template_schema.go index 3c6703c509..22eade9e49 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/beta/workflow_template_schema.go @@ -289,7 +289,7 @@ func DCLWorkflowTemplateSchema() *dcl.Schema { "queries": &dcl.Property{ Type: "array", GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Immutable: true, SendEmpty: true, ListType: "list", @@ -391,7 +391,7 @@ func DCLWorkflowTemplateSchema() *dcl.Schema { "queries": &dcl.Property{ Type: "array", GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Immutable: true, SendEmpty: true, ListType: "list", @@ -502,7 +502,7 @@ func DCLWorkflowTemplateSchema() *dcl.Schema { "queries": &dcl.Property{ Type: "array", GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Immutable: true, SendEmpty: true, ListType: "list", @@ -885,7 +885,7 @@ func DCLWorkflowTemplateSchema() *dcl.Schema { "queries": &dcl.Property{ Type: "array", GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Immutable: true, SendEmpty: true, ListType: "list", diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/cluster.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/cluster.go index eac6df102e..013b7d992a 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/cluster.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/cluster.go @@ -1941,10 +1941,9 @@ func (c *Client) GetCluster(ctx context.Context, r *Cluster) (*Cluster, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template.go index 195a7b67c6..5cab409821 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template.go @@ -3169,10 +3169,9 @@ func (c *Client) GetWorkflowTemplate(ctx context.Context, r *WorkflowTemplate) ( if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template.yaml index cb88301ba3..fd60651d2f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template.yaml @@ -262,7 +262,7 @@ components: need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet - that uses a QueryList to specify a HiveJob: "hiveJob": { + that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' x-kubernetes-immutable: true @@ -367,7 +367,7 @@ components: need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet - that uses a QueryList to specify a HiveJob: "hiveJob": { + that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' x-kubernetes-immutable: true @@ -473,7 +473,7 @@ components: need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet - that uses a QueryList to specify a HiveJob: "hiveJob": { + that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' x-kubernetes-immutable: true @@ -851,7 +851,7 @@ components: need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet - that uses a QueryList to specify a HiveJob: "hiveJob": { + that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } }' x-kubernetes-immutable: true diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template_schema.go index e2d229bfd8..885ef8d4b9 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template_schema.go @@ -289,7 +289,7 @@ func DCLWorkflowTemplateSchema() *dcl.Schema { "queries": &dcl.Property{ Type: "array", GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Immutable: true, SendEmpty: true, ListType: "list", @@ -391,7 +391,7 @@ func DCLWorkflowTemplateSchema() *dcl.Schema { "queries": &dcl.Property{ Type: "array", GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Immutable: true, SendEmpty: true, ListType: "list", @@ -502,7 +502,7 @@ func DCLWorkflowTemplateSchema() *dcl.Schema { "queries": &dcl.Property{ Type: "array", GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Immutable: true, SendEmpty: true, ListType: "list", @@ -885,7 +885,7 @@ func DCLWorkflowTemplateSchema() *dcl.Schema { "queries": &dcl.Property{ Type: "array", GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Immutable: true, SendEmpty: true, ListType: "list", diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template_yaml_embed.go index 0fbf615650..d7a15b306f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/workflow_template_yaml_embed.go @@ -17,7 +17,7 @@ package dataproc // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/dataproc/workflow_template.yaml -var YAML_workflow_template = []byte("info:\n title: Dataproc/WorkflowTemplate\n description: The Dataproc WorkflowTemplate resource\n x-dcl-struct-name: WorkflowTemplate\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n apply:\n description: The function used to apply information about a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n delete:\n description: The function used to delete a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n deleteAll:\n description: The function used to delete all WorkflowTemplate\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many WorkflowTemplate\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n WorkflowTemplate:\n title: WorkflowTemplate\n x-dcl-id: projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - placement\n - jobs\n - project\n - location\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time template was created.\n x-kubernetes-immutable: true\n dagTimeout:\n type: string\n x-dcl-go-name: DagTimeout\n description: Optional. Timeout duration for the DAG of jobs, expressed in\n seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n The timeout duration must be from 10 minutes (\"600s\") to 24 hours (\"86400s\").\n The timer begins when the first job is submitted. If the workflow is running\n at the end of the timeout period, any remaining jobs are cancelled, the\n workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),\n the cluster is deleted.\n x-kubernetes-immutable: true\n jobs:\n type: array\n x-dcl-go-name: Jobs\n description: Required. The Directed Acyclic Graph of Jobs to submit.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplateJobs\n required:\n - stepId\n properties:\n hadoopJob:\n type: object\n x-dcl-go-name: HadoopJob\n x-dcl-go-type: WorkflowTemplateJobsHadoopJob\n description: Optional. Job is a Hadoop job.\n x-kubernetes-immutable: true\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n in the working directory of Hadoop drivers and tasks. Supported\n file types: .jar, .tar, .tar.gz, .tgz, or .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `-libjars` or `-Dfoo=bar`, that\n can be set as job properties, since a collision may occur that\n causes an incorrect job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS (Hadoop Compatible Filesystem) URIs\n of files to be copied to the working directory of Hadoop drivers\n and distributed tasks. Useful for naively parallel tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. Jar file URIs to add to the CLASSPATHs\n of the Hadoop driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsHadoopJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainClass:\n type: string\n x-dcl-go-name: MainClass\n description: The name of the driver's main class. The jar file\n containing the class must be in the default CLASSPATH or specified\n in `jar_file_uris`.\n x-kubernetes-immutable: true\n mainJarFileUri:\n type: string\n x-dcl-go-name: MainJarFileUri\n description: 'The HCFS URI of the jar file containing the main\n class. Examples: ''gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar''\n ''hdfs:/tmp/test-samples/custom-wordcount.jar'' ''file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Hadoop. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site and classes in user code.\n x-kubernetes-immutable: true\n hiveJob:\n type: object\n x-dcl-go-name: HiveJob\n x-dcl-go-type: WorkflowTemplateJobsHiveJob\n description: Optional. Job is a Hive job.\n x-kubernetes-immutable: true\n properties:\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATH\n of the Hive server and Hadoop MapReduce (MR) tasks. Can contain\n Hive SerDes and UDFs.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names and values,\n used to configure Hive. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml,\n and classes in user code.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains Hive queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsHiveJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\": {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Hive command: `SET name=\"value\";`).'\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The labels to associate with this job. Label\n keys must be between 1 and 63 characters long, and must conform\n to the following regular expression: p{Ll}p{Lo}{0,62} Label values\n must be between 1 and 63 characters long, and must conform to the\n following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than\n 32 labels can be associated with a given job.'\n x-kubernetes-immutable: true\n pigJob:\n type: object\n x-dcl-go-name: PigJob\n x-dcl-go-type: WorkflowTemplateJobsPigJob\n description: Optional. Job is a Pig job.\n x-kubernetes-immutable: true\n properties:\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATH\n of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain\n Pig UDFs.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPigJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Pig. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties,\n and classes in user code.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains the Pig\n queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsPigJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\": {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Pig command: `name=[value]`).'\n x-kubernetes-immutable: true\n prerequisiteStepIds:\n type: array\n x-dcl-go-name: PrerequisiteStepIds\n description: Optional. The optional list of prerequisite job step_ids.\n If not specified, the job will start at the beginning of workflow.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n prestoJob:\n type: object\n x-dcl-go-name: PrestoJob\n x-dcl-go-type: WorkflowTemplateJobsPrestoJob\n description: Optional. Job is a Presto job.\n x-kubernetes-immutable: true\n properties:\n clientTags:\n type: array\n x-dcl-go-name: ClientTags\n description: Optional. Presto client tags to attach to this query\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPrestoJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n outputFormat:\n type: string\n x-dcl-go-name: OutputFormat\n description: Optional. The format in which query output will be\n displayed. See the Presto documentation for supported output\n formats\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values.\n Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html)\n Equivalent to using the --session flag in the Presto CLI\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains SQL queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsPrestoJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\": {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n pysparkJob:\n type: object\n x-dcl-go-name: PysparkJob\n x-dcl-go-type: WorkflowTemplateJobsPysparkJob\n description: Optional. Job is a PySpark job.\n x-kubernetes-immutable: true\n required:\n - mainPythonFileUri\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATHs\n of the Python driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPysparkJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainPythonFileUri:\n type: string\n x-dcl-go-name: MainPythonFileUri\n description: Required. The HCFS URI of the main Python file to\n use as the driver. Must be a .py file.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure PySpark. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n pythonFileUris:\n type: array\n x-dcl-go-name: PythonFileUris\n description: 'Optional. HCFS file URIs of Python files to pass\n to the PySpark framework. Supported file types: .py, .egg, and\n .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scheduling:\n type: object\n x-dcl-go-name: Scheduling\n x-dcl-go-type: WorkflowTemplateJobsScheduling\n description: Optional. Job scheduling configuration.\n x-kubernetes-immutable: true\n properties:\n maxFailuresPerHour:\n type: integer\n format: int64\n x-dcl-go-name: MaxFailuresPerHour\n description: Optional. Maximum number of times per hour a driver\n may be restarted as a result of driver exiting with non-zero\n code before job is reported failed. A job may be reported as\n thrashing if driver exits with non-zero code 4 times within\n 10 minute window. Maximum value is 10.\n x-kubernetes-immutable: true\n maxFailuresTotal:\n type: integer\n format: int64\n x-dcl-go-name: MaxFailuresTotal\n description: Optional. Maximum number of times in total a driver\n may be restarted as a result of driver exiting with non-zero\n code before job is reported failed. Maximum value is 240.\n x-kubernetes-immutable: true\n sparkJob:\n type: object\n x-dcl-go-name: SparkJob\n x-dcl-go-type: WorkflowTemplateJobsSparkJob\n description: Optional. Job is a Spark job.\n x-kubernetes-immutable: true\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATHs\n of the Spark driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainClass:\n type: string\n x-dcl-go-name: MainClass\n description: The name of the driver's main class. The jar file\n that contains the class must be in the default CLASSPATH or\n specified in `jar_file_uris`.\n x-kubernetes-immutable: true\n mainJarFileUri:\n type: string\n x-dcl-go-name: MainJarFileUri\n description: The HCFS URI of the jar file that contains the main\n class.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Spark. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n sparkRJob:\n type: object\n x-dcl-go-name: SparkRJob\n x-dcl-go-type: WorkflowTemplateJobsSparkRJob\n description: Optional. Job is a SparkR job.\n x-kubernetes-immutable: true\n required:\n - mainRFileUri\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkRJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainRFileUri:\n type: string\n x-dcl-go-name: MainRFileUri\n description: Required. The HCFS URI of the main R file to use\n as the driver. Must be a .R file.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure SparkR. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n sparkSqlJob:\n type: object\n x-dcl-go-name: SparkSqlJob\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJob\n description: Optional. Job is a SparkSql job.\n x-kubernetes-immutable: true\n properties:\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to be added to the\n Spark CLASSPATH.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Spark SQL's SparkConf. Properties that conflict\n with values set by the Dataproc API may be overwritten.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains SQL queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\": {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Spark SQL command: SET `name=\"value\";`).'\n x-kubernetes-immutable: true\n stepId:\n type: string\n x-dcl-go-name: StepId\n description: Required. The step id. The id must be unique among all\n jobs within the template. The step id is used as prefix for job\n id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds\n field from other steps. The id must contain only letters (a-z, A-Z),\n numbers (0-9), underscores (_), and hyphens (-). Cannot begin or\n end with underscore or hyphen. Must consist of between 3 and 50\n characters.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. The labels to associate with this template. These\n labels will be propagated to all jobs and clusters created by the workflow\n instance. Label **keys** must contain 1 to 63 characters, and must conform\n to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values**\n may be empty, but, if present, must contain 1 to 63 characters, and must\n conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than\n 32 labels can be associated with a template.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Output only. The resource name of the workflow template, as\n described in https://cloud.google.com/apis/design/resource_names. * For\n `projects.regions.workflowTemplates`, the resource name of the template\n has the following format: `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`\n * For `projects.locations.workflowTemplates`, the resource name of the\n template has the following format: `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`'\n x-kubernetes-immutable: true\n parameters:\n type: array\n x-dcl-go-name: Parameters\n description: Optional. Template parameters whose values are substituted\n into the template. Values for parameters must be provided when the template\n is instantiated.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplateParameters\n required:\n - name\n - fields\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Brief description of the parameter. Must not\n exceed 1024 characters.\n x-kubernetes-immutable: true\n fields:\n type: array\n x-dcl-go-name: Fields\n description: 'Required. Paths to all fields that the parameter replaces.\n A field is allowed to appear in at most one parameter''s list of\n field paths. A field path is similar in syntax to a google.protobuf.FieldMask.\n For example, a field path that references the zone field of a workflow\n template''s cluster selector would be specified as `placement.clusterSelector.zone`.\n Also, field paths can reference fields using the following syntax:\n * Values in maps can be referenced by key: * labels[''key''] * placement.clusterSelector.clusterLabels[''key'']\n * placement.managedCluster.labels[''key''] * placement.clusterSelector.clusterLabels[''key'']\n * jobs[''step-id''].labels[''key''] * Jobs in the jobs list can\n be referenced by step-id: * jobs[''step-id''].hadoopJob.mainJarFileUri\n * jobs[''step-id''].hiveJob.queryFileUri * jobs[''step-id''].pySparkJob.mainPythonFileUri\n * jobs[''step-id''].hadoopJob.jarFileUris[0] * jobs[''step-id''].hadoopJob.archiveUris[0]\n * jobs[''step-id''].hadoopJob.fileUris[0] * jobs[''step-id''].pySparkJob.pythonFileUris[0]\n * Items in repeated fields can be referenced by a zero-based index:\n * jobs[''step-id''].sparkJob.args[0] * Other examples: * jobs[''step-id''].hadoopJob.properties[''key'']\n * jobs[''step-id''].hadoopJob.args[0] * jobs[''step-id''].hiveJob.scriptVariables[''key'']\n * jobs[''step-id''].hadoopJob.mainJarFileUri * placement.clusterSelector.zone\n It may not be possible to parameterize maps and repeated fields\n in their entirety since only individual map values and individual\n items in repeated fields can be referenced. For example, the following\n field paths are invalid: - placement.clusterSelector.clusterLabels\n - jobs[''step-id''].sparkJob.args'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Parameter name. The parameter name is used\n as the key, and paired with the parameter value, which are passed\n to the template when the template is instantiated. The name must\n contain only capital letters (A-Z), numbers (0-9), and underscores\n (_), and must not start with a number. The maximum length is 40\n characters.\n x-kubernetes-immutable: true\n validation:\n type: object\n x-dcl-go-name: Validation\n x-dcl-go-type: WorkflowTemplateParametersValidation\n description: Optional. Validation rules to be applied to this parameter's\n value.\n x-kubernetes-immutable: true\n properties:\n regex:\n type: object\n x-dcl-go-name: Regex\n x-dcl-go-type: WorkflowTemplateParametersValidationRegex\n description: Validation based on regular expressions.\n x-kubernetes-immutable: true\n required:\n - regexes\n properties:\n regexes:\n type: array\n x-dcl-go-name: Regexes\n description: Required. RE2 regular expressions used to validate\n the parameter's value. The value must match the regex in\n its entirety (substring matches are not sufficient).\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n values:\n type: object\n x-dcl-go-name: Values\n x-dcl-go-type: WorkflowTemplateParametersValidationValues\n description: Validation based on a list of allowed values.\n x-kubernetes-immutable: true\n required:\n - values\n properties:\n values:\n type: array\n x-dcl-go-name: Values\n description: Required. List of allowed values for the parameter.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n placement:\n type: object\n x-dcl-go-name: Placement\n x-dcl-go-type: WorkflowTemplatePlacement\n description: Required. WorkflowTemplate scheduling information.\n x-kubernetes-immutable: true\n properties:\n clusterSelector:\n type: object\n x-dcl-go-name: ClusterSelector\n x-dcl-go-type: WorkflowTemplatePlacementClusterSelector\n description: Optional. A selector that chooses target cluster for jobs\n based on metadata. The selector is evaluated at the time each job\n is submitted.\n x-kubernetes-immutable: true\n required:\n - clusterLabels\n properties:\n clusterLabels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ClusterLabels\n description: Required. The cluster labels. Cluster must have all\n labels to match.\n x-kubernetes-immutable: true\n zone:\n type: string\n x-dcl-go-name: Zone\n description: Optional. The zone where workflow process executes.\n This parameter does not affect the selection of the cluster. If\n unspecified, the zone of the first cluster matching the selector\n is used.\n x-kubernetes-immutable: true\n managedCluster:\n type: object\n x-dcl-go-name: ManagedCluster\n x-dcl-go-type: WorkflowTemplatePlacementManagedCluster\n description: A cluster that is managed by the workflow.\n x-kubernetes-immutable: true\n required:\n - clusterName\n - config\n properties:\n clusterName:\n type: string\n x-dcl-go-name: ClusterName\n description: Required. The cluster name prefix. A unique cluster\n name will be formed by appending a random suffix. The name must\n contain only lower-case letters (a-z), numbers (0-9), and hyphens\n (-). Must begin with a letter. Cannot begin or end with hyphen.\n Must consist of between 2 and 35 characters.\n x-kubernetes-immutable: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfig\n description: Required. The cluster configuration.\n x-kubernetes-immutable: true\n properties:\n autoscalingConfig:\n type: object\n x-dcl-go-name: AutoscalingConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig\n description: Optional. Autoscaling config for the policy associated\n with the cluster. Cluster does not autoscale if this field\n is unset.\n x-kubernetes-immutable: true\n properties:\n policy:\n type: string\n x-dcl-go-name: Policy\n description: 'Optional. The autoscaling policy used by the\n cluster. Only resource names including projectid and location\n (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`\n * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`\n Note that the policy must be in the same project and Dataproc\n region.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Dataproc/AutoscalingPolicy\n field: name\n encryptionConfig:\n type: object\n x-dcl-go-name: EncryptionConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig\n description: Optional. Encryption settings for the cluster.\n x-kubernetes-immutable: true\n properties:\n gcePdKmsKeyName:\n type: string\n x-dcl-go-name: GcePdKmsKeyName\n description: Optional. The Cloud KMS key name to use for\n PD disk encryption for all instances in the cluster.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: selfLink\n endpointConfig:\n type: object\n x-dcl-go-name: EndpointConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigEndpointConfig\n description: Optional. Port/endpoint configuration for this\n cluster\n x-kubernetes-immutable: true\n properties:\n enableHttpPortAccess:\n type: boolean\n x-dcl-go-name: EnableHttpPortAccess\n description: Optional. If true, enable http access to specific\n ports on the cluster from external sources. Defaults to\n false.\n x-kubernetes-immutable: true\n httpPorts:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: HttpPorts\n readOnly: true\n description: Output only. The map of port descriptions to\n URLs. Will only be populated if enable_http_port_access\n is true.\n x-kubernetes-immutable: true\n gceClusterConfig:\n type: object\n x-dcl-go-name: GceClusterConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig\n description: Optional. The shared Compute Engine config settings\n for all instances in a cluster.\n x-kubernetes-immutable: true\n properties:\n internalIPOnly:\n type: boolean\n x-dcl-go-name: InternalIPOnly\n description: Optional. If true, all instances in the cluster\n will only have internal IP addresses. By default, clusters\n are not restricted to internal IP addresses, and will\n have ephemeral external IP addresses assigned to each\n instance. This `internal_ip_only` restriction can only\n be enabled for subnetwork enabled networks, and all off-cluster\n dependencies must be configured to be accessible without\n external IP addresses.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n metadata:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Metadata\n description: The Compute Engine metadata entries to add\n to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).\n x-kubernetes-immutable: true\n network:\n type: string\n x-dcl-go-name: Network\n description: 'Optional. The Compute Engine network to be\n used for machine communications. Cannot be specified with\n subnetwork_uri. If neither `network_uri` nor `subnetwork_uri`\n is specified, the \"default\" network of the project is\n used, if it exists. Cannot be a \"Custom Subnet Network\"\n (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks)\n for more information). A full URL, partial URI, or short\n name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`\n * `projects/[project_id]/regions/global/default` * `default`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Network\n field: selfLink\n nodeGroupAffinity:\n type: object\n x-dcl-go-name: NodeGroupAffinity\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity\n description: Optional. Node Group Affinity for sole-tenant\n clusters.\n x-kubernetes-immutable: true\n required:\n - nodeGroup\n properties:\n nodeGroup:\n type: string\n x-dcl-go-name: NodeGroup\n description: 'Required. The URI of a sole-tenant [node\n group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)\n that the cluster will be created on. A full URL, partial\n URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`\n * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`\n * `node-group-1`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/NodeGroup\n field: selfLink\n privateIPv6GoogleAccess:\n type: string\n x-dcl-go-name: PrivateIPv6GoogleAccess\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum\n description: 'Optional. The type of IPv6 access for a cluster.\n Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED,\n INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL'\n x-kubernetes-immutable: true\n enum:\n - PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED\n - INHERIT_FROM_SUBNETWORK\n - OUTBOUND\n - BIDIRECTIONAL\n reservationAffinity:\n type: object\n x-dcl-go-name: ReservationAffinity\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity\n description: Optional. Reservation Affinity for consuming\n Zonal reservation.\n x-kubernetes-immutable: true\n properties:\n consumeReservationType:\n type: string\n x-dcl-go-name: ConsumeReservationType\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum\n description: 'Optional. Type of reservation to consume\n Possible values: TYPE_UNSPECIFIED, NO_RESERVATION,\n ANY_RESERVATION, SPECIFIC_RESERVATION'\n x-kubernetes-immutable: true\n enum:\n - TYPE_UNSPECIFIED\n - NO_RESERVATION\n - ANY_RESERVATION\n - SPECIFIC_RESERVATION\n key:\n type: string\n x-dcl-go-name: Key\n description: Optional. Corresponds to the label key\n of reservation resource.\n x-kubernetes-immutable: true\n values:\n type: array\n x-dcl-go-name: Values\n description: Optional. Corresponds to the label values\n of reservation resource.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc)\n (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))\n used by Dataproc cluster VM instances to access Google\n Cloud Platform services. If not specified, the [Compute\n Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account)\n is used.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n serviceAccountScopes:\n type: array\n x-dcl-go-name: ServiceAccountScopes\n description: 'Optional. The URIs of service account scopes\n to be included in Compute Engine instances. The following\n base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly\n * https://www.googleapis.com/auth/devstorage.read_write\n * https://www.googleapis.com/auth/logging.write If no\n scopes are specified, the following defaults are also\n provided: * https://www.googleapis.com/auth/bigquery *\n https://www.googleapis.com/auth/bigtable.admin.table *\n https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n subnetwork:\n type: string\n x-dcl-go-name: Subnetwork\n description: 'Optional. The Compute Engine subnetwork to\n be used for machine communications. Cannot be specified\n with network_uri. A full URL, partial URI, or short name\n are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0`\n * `projects/[project_id]/regions/us-east1/subnetworks/sub0`\n * `sub0`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Subnetwork\n field: selfLink\n tags:\n type: array\n x-dcl-go-name: Tags\n description: The Compute Engine tags to add to all instances\n (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: string\n x-dcl-go-type: string\n zone:\n type: string\n x-dcl-go-name: Zone\n description: 'Optional. The zone where the Compute Engine\n cluster will be located. On a create request, it is required\n in the \"global\" region. If omitted in a non-global Dataproc\n region, the service will pick a zone in the corresponding\n Compute Engine region. On a get request, zone will always\n be present. A full URL, partial URI, or short name are\n valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`\n * `projects/[project_id]/zones/[zone]` * `us-central1-f`'\n x-kubernetes-immutable: true\n initializationActions:\n type: array\n x-dcl-go-name: InitializationActions\n description: 'Optional. Commands to execute on each node after\n config is completed. By default, executables are run on master\n and all worker nodes. You can test a node''s `role` metadata\n to run an executable on a master or worker node, as shown\n below using `curl` (you can also use `wget`): ROLE=$(curl\n -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)\n if [[ \"${ROLE}\" == ''Master'' ]]; then ... master specific\n actions ... else ... worker specific actions ... fi'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigInitializationActions\n properties:\n executableFile:\n type: string\n x-dcl-go-name: ExecutableFile\n description: Required. Cloud Storage URI of executable\n file.\n x-kubernetes-immutable: true\n executionTimeout:\n type: string\n x-dcl-go-name: ExecutionTimeout\n description: Optional. Amount of time executable has to\n complete. Default is 10 minutes (see JSON representation\n of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n Cluster creation fails with an explanatory error message\n (the name of the executable that caused the error and\n the exceeded timeout period) if the executable is not\n completed at end of the timeout period.\n x-kubernetes-immutable: true\n lifecycleConfig:\n type: object\n x-dcl-go-name: LifecycleConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig\n description: Optional. Lifecycle setting for the cluster.\n x-kubernetes-immutable: true\n properties:\n autoDeleteTime:\n type: string\n format: date-time\n x-dcl-go-name: AutoDeleteTime\n description: Optional. The time when cluster will be auto-deleted\n (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n autoDeleteTtl:\n type: string\n x-dcl-go-name: AutoDeleteTtl\n description: Optional. The lifetime duration of cluster.\n The cluster will be auto-deleted at the end of this period.\n Minimum value is 10 minutes; maximum value is 14 days\n (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n idleDeleteTtl:\n type: string\n x-dcl-go-name: IdleDeleteTtl\n description: Optional. The duration to keep the cluster\n alive while idling (when no jobs are running). Passing\n this threshold will cause the cluster to be deleted. Minimum\n value is 5 minutes; maximum value is 14 days (see JSON\n representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n idleStartTime:\n type: string\n format: date-time\n x-dcl-go-name: IdleStartTime\n readOnly: true\n description: Output only. The time when cluster became idle\n (most recent job finished) and became eligible for deletion\n due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n masterConfig:\n type: object\n x-dcl-go-name: MasterConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfig\n description: Optional. The Compute Engine config settings for\n the master instance in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n secondaryWorkerConfig:\n type: object\n x-dcl-go-name: SecondaryWorkerConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig\n description: Optional. The Compute Engine config settings for\n additional worker instances in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n securityConfig:\n type: object\n x-dcl-go-name: SecurityConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecurityConfig\n description: Optional. Security settings for the cluster.\n x-kubernetes-immutable: true\n properties:\n kerberosConfig:\n type: object\n x-dcl-go-name: KerberosConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig\n description: Optional. Kerberos related configuration.\n x-kubernetes-immutable: true\n properties:\n crossRealmTrustAdminServer:\n type: string\n x-dcl-go-name: CrossRealmTrustAdminServer\n description: Optional. The admin server (IP or hostname)\n for the remote trusted realm in a cross realm trust\n relationship.\n x-kubernetes-immutable: true\n crossRealmTrustKdc:\n type: string\n x-dcl-go-name: CrossRealmTrustKdc\n description: Optional. The KDC (IP or hostname) for\n the remote trusted realm in a cross realm trust relationship.\n x-kubernetes-immutable: true\n crossRealmTrustRealm:\n type: string\n x-dcl-go-name: CrossRealmTrustRealm\n description: Optional. The remote realm the Dataproc\n on-cluster KDC will trust, should the user enable\n cross realm trust.\n x-kubernetes-immutable: true\n crossRealmTrustSharedPassword:\n type: string\n x-dcl-go-name: CrossRealmTrustSharedPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the shared password between\n the on-cluster Kerberos realm and the remote trusted\n realm, in a cross realm trust relationship.\n x-kubernetes-immutable: true\n enableKerberos:\n type: boolean\n x-dcl-go-name: EnableKerberos\n description: 'Optional. Flag to indicate whether to\n Kerberize the cluster (default: false). Set this field\n to true to enable Kerberos on a cluster.'\n x-kubernetes-immutable: true\n kdcDbKey:\n type: string\n x-dcl-go-name: KdcDbKey\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the master key of the KDC\n database.\n x-kubernetes-immutable: true\n keyPassword:\n type: string\n x-dcl-go-name: KeyPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided key. For the self-signed certificate, this\n password is generated by Dataproc.\n x-kubernetes-immutable: true\n keystore:\n type: string\n x-dcl-go-name: Keystore\n description: Optional. The Cloud Storage URI of the\n keystore file used for SSL encryption. If not provided,\n Dataproc will provide a self-signed certificate.\n x-kubernetes-immutable: true\n keystorePassword:\n type: string\n x-dcl-go-name: KeystorePassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided keystore. For the self-signed certificate,\n this password is generated by Dataproc.\n x-kubernetes-immutable: true\n kmsKey:\n type: string\n x-dcl-go-name: KmsKey\n description: Optional. The uri of the KMS key used to\n encrypt various sensitive files.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: selfLink\n realm:\n type: string\n x-dcl-go-name: Realm\n description: Optional. The name of the on-cluster Kerberos\n realm. If not specified, the uppercased domain of\n hostnames will be the realm.\n x-kubernetes-immutable: true\n rootPrincipalPassword:\n type: string\n x-dcl-go-name: RootPrincipalPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the root principal password.\n x-kubernetes-immutable: true\n tgtLifetimeHours:\n type: integer\n format: int64\n x-dcl-go-name: TgtLifetimeHours\n description: Optional. The lifetime of the ticket granting\n ticket, in hours. If not specified, or user specifies\n 0, then default value 10 will be used.\n x-kubernetes-immutable: true\n truststore:\n type: string\n x-dcl-go-name: Truststore\n description: Optional. The Cloud Storage URI of the\n truststore file used for SSL encryption. If not provided,\n Dataproc will provide a self-signed certificate.\n x-kubernetes-immutable: true\n truststorePassword:\n type: string\n x-dcl-go-name: TruststorePassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided truststore. For the self-signed certificate,\n this password is generated by Dataproc.\n x-kubernetes-immutable: true\n softwareConfig:\n type: object\n x-dcl-go-name: SoftwareConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig\n description: Optional. The config settings for software inside\n the cluster.\n x-kubernetes-immutable: true\n properties:\n imageVersion:\n type: string\n x-dcl-go-name: ImageVersion\n description: Optional. The version of software inside the\n cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions),\n such as \"1.2\" (including a subminor version, such as \"1.2.29\"),\n or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).\n If unspecified, it defaults to the latest Debian version.\n x-kubernetes-immutable: true\n optionalComponents:\n type: array\n x-dcl-go-name: OptionalComponents\n description: Optional. The set of components to activate\n on the cluster.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum\n enum:\n - COMPONENT_UNSPECIFIED\n - ANACONDA\n - DOCKER\n - DRUID\n - FLINK\n - HBASE\n - HIVE_WEBHCAT\n - JUPYTER\n - KERBEROS\n - PRESTO\n - RANGER\n - SOLR\n - ZEPPELIN\n - ZOOKEEPER\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: 'Optional. The properties to set on daemon\n config files. Property keys are specified in `prefix:property`\n format, for example `core:hadoop.tmp.dir`. The following\n are supported prefixes and their mappings: * capacity-scheduler:\n `capacity-scheduler.xml` * core: `core-site.xml` * distcp:\n `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml`\n * mapred: `mapred-site.xml` * pig: `pig.properties` *\n spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For\n more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).'\n x-kubernetes-immutable: true\n stagingBucket:\n type: string\n x-dcl-go-name: StagingBucket\n description: Optional. A Cloud Storage bucket used to stage\n job dependencies, config files, and job driver console output.\n If you do not specify a staging bucket, Cloud Dataproc will\n determine a Cloud Storage location (US, ASIA, or EU) for your\n cluster's staging bucket according to the Compute Engine zone\n where your cluster is deployed, and then create and manage\n this project-level, per-location bucket (see [Dataproc staging\n bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).\n **This field requires a Cloud Storage bucket name, not a URI\n to a Cloud Storage bucket.**\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n tempBucket:\n type: string\n x-dcl-go-name: TempBucket\n description: Optional. A Cloud Storage bucket used to store\n ephemeral cluster and jobs data, such as Spark and MapReduce\n history files. If you do not specify a temp bucket, Dataproc\n will determine a Cloud Storage location (US, ASIA, or EU)\n for your cluster's temp bucket according to the Compute Engine\n zone where your cluster is deployed, and then create and manage\n this project-level, per-location bucket. The default bucket\n has a TTL of 90 days, but you can use any TTL (or none) if\n you specify a bucket. **This field requires a Cloud Storage\n bucket name, not a URI to a Cloud Storage bucket.**\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n workerConfig:\n type: object\n x-dcl-go-name: WorkerConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfig\n description: Optional. The Compute Engine config settings for\n worker instances in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The labels to associate with this cluster.\n Label keys must be between 1 and 63 characters long, and must\n conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62}\n Label values must be between 1 and 63 characters long, and must\n conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63}\n No more than 32 labels can be associated with a given cluster.'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time template was last updated.\n x-kubernetes-immutable: true\n version:\n type: integer\n format: int64\n x-dcl-go-name: Version\n readOnly: true\n description: Output only. The current version of this workflow template.\n x-kubernetes-immutable: true\n") +var YAML_workflow_template = []byte("info:\n title: Dataproc/WorkflowTemplate\n description: The Dataproc WorkflowTemplate resource\n x-dcl-struct-name: WorkflowTemplate\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n apply:\n description: The function used to apply information about a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n delete:\n description: The function used to delete a WorkflowTemplate\n parameters:\n - name: WorkflowTemplate\n required: true\n description: A full instance of a WorkflowTemplate\n deleteAll:\n description: The function used to delete all WorkflowTemplate\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many WorkflowTemplate\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n WorkflowTemplate:\n title: WorkflowTemplate\n x-dcl-id: projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - placement\n - jobs\n - project\n - location\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time template was created.\n x-kubernetes-immutable: true\n dagTimeout:\n type: string\n x-dcl-go-name: DagTimeout\n description: Optional. Timeout duration for the DAG of jobs, expressed in\n seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n The timeout duration must be from 10 minutes (\"600s\") to 24 hours (\"86400s\").\n The timer begins when the first job is submitted. If the workflow is running\n at the end of the timeout period, any remaining jobs are cancelled, the\n workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),\n the cluster is deleted.\n x-kubernetes-immutable: true\n jobs:\n type: array\n x-dcl-go-name: Jobs\n description: Required. The Directed Acyclic Graph of Jobs to submit.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplateJobs\n required:\n - stepId\n properties:\n hadoopJob:\n type: object\n x-dcl-go-name: HadoopJob\n x-dcl-go-type: WorkflowTemplateJobsHadoopJob\n description: Optional. Job is a Hadoop job.\n x-kubernetes-immutable: true\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n in the working directory of Hadoop drivers and tasks. Supported\n file types: .jar, .tar, .tar.gz, .tgz, or .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `-libjars` or `-Dfoo=bar`, that\n can be set as job properties, since a collision may occur that\n causes an incorrect job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS (Hadoop Compatible Filesystem) URIs\n of files to be copied to the working directory of Hadoop drivers\n and distributed tasks. Useful for naively parallel tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. Jar file URIs to add to the CLASSPATHs\n of the Hadoop driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsHadoopJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainClass:\n type: string\n x-dcl-go-name: MainClass\n description: The name of the driver's main class. The jar file\n containing the class must be in the default CLASSPATH or specified\n in `jar_file_uris`.\n x-kubernetes-immutable: true\n mainJarFileUri:\n type: string\n x-dcl-go-name: MainJarFileUri\n description: 'The HCFS URI of the jar file containing the main\n class. Examples: ''gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar''\n ''hdfs:/tmp/test-samples/custom-wordcount.jar'' ''file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Hadoop. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site and classes in user code.\n x-kubernetes-immutable: true\n hiveJob:\n type: object\n x-dcl-go-name: HiveJob\n x-dcl-go-type: WorkflowTemplateJobsHiveJob\n description: Optional. Job is a Hive job.\n x-kubernetes-immutable: true\n properties:\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATH\n of the Hive server and Hadoop MapReduce (MR) tasks. Can contain\n Hive SerDes and UDFs.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names and values,\n used to configure Hive. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml,\n and classes in user code.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains Hive queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsHiveJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\" {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Hive command: `SET name=\"value\";`).'\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The labels to associate with this job. Label\n keys must be between 1 and 63 characters long, and must conform\n to the following regular expression: p{Ll}p{Lo}{0,62} Label values\n must be between 1 and 63 characters long, and must conform to the\n following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than\n 32 labels can be associated with a given job.'\n x-kubernetes-immutable: true\n pigJob:\n type: object\n x-dcl-go-name: PigJob\n x-dcl-go-type: WorkflowTemplateJobsPigJob\n description: Optional. Job is a Pig job.\n x-kubernetes-immutable: true\n properties:\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATH\n of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain\n Pig UDFs.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPigJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Pig. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties,\n and classes in user code.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains the Pig\n queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsPigJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\" {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Pig command: `name=[value]`).'\n x-kubernetes-immutable: true\n prerequisiteStepIds:\n type: array\n x-dcl-go-name: PrerequisiteStepIds\n description: Optional. The optional list of prerequisite job step_ids.\n If not specified, the job will start at the beginning of workflow.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n prestoJob:\n type: object\n x-dcl-go-name: PrestoJob\n x-dcl-go-type: WorkflowTemplateJobsPrestoJob\n description: Optional. Job is a Presto job.\n x-kubernetes-immutable: true\n properties:\n clientTags:\n type: array\n x-dcl-go-name: ClientTags\n description: Optional. Presto client tags to attach to this query\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n continueOnFailure:\n type: boolean\n x-dcl-go-name: ContinueOnFailure\n description: Optional. Whether to continue executing queries if\n a query fails. The default value is `false`. Setting to `true`\n can be useful when executing independent parallel queries.\n x-kubernetes-immutable: true\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPrestoJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n outputFormat:\n type: string\n x-dcl-go-name: OutputFormat\n description: Optional. The format in which query output will be\n displayed. See the Presto documentation for supported output\n formats\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values.\n Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html)\n Equivalent to using the --session flag in the Presto CLI\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains SQL queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsPrestoJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\" {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n pysparkJob:\n type: object\n x-dcl-go-name: PysparkJob\n x-dcl-go-type: WorkflowTemplateJobsPysparkJob\n description: Optional. Job is a PySpark job.\n x-kubernetes-immutable: true\n required:\n - mainPythonFileUri\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATHs\n of the Python driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsPysparkJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainPythonFileUri:\n type: string\n x-dcl-go-name: MainPythonFileUri\n description: Required. The HCFS URI of the main Python file to\n use as the driver. Must be a .py file.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure PySpark. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n pythonFileUris:\n type: array\n x-dcl-go-name: PythonFileUris\n description: 'Optional. HCFS file URIs of Python files to pass\n to the PySpark framework. Supported file types: .py, .egg, and\n .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scheduling:\n type: object\n x-dcl-go-name: Scheduling\n x-dcl-go-type: WorkflowTemplateJobsScheduling\n description: Optional. Job scheduling configuration.\n x-kubernetes-immutable: true\n properties:\n maxFailuresPerHour:\n type: integer\n format: int64\n x-dcl-go-name: MaxFailuresPerHour\n description: Optional. Maximum number of times per hour a driver\n may be restarted as a result of driver exiting with non-zero\n code before job is reported failed. A job may be reported as\n thrashing if driver exits with non-zero code 4 times within\n 10 minute window. Maximum value is 10.\n x-kubernetes-immutable: true\n maxFailuresTotal:\n type: integer\n format: int64\n x-dcl-go-name: MaxFailuresTotal\n description: Optional. Maximum number of times in total a driver\n may be restarted as a result of driver exiting with non-zero\n code before job is reported failed. Maximum value is 240.\n x-kubernetes-immutable: true\n sparkJob:\n type: object\n x-dcl-go-name: SparkJob\n x-dcl-go-type: WorkflowTemplateJobsSparkJob\n description: Optional. Job is a Spark job.\n x-kubernetes-immutable: true\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to add to the CLASSPATHs\n of the Spark driver and tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainClass:\n type: string\n x-dcl-go-name: MainClass\n description: The name of the driver's main class. The jar file\n that contains the class must be in the default CLASSPATH or\n specified in `jar_file_uris`.\n x-kubernetes-immutable: true\n mainJarFileUri:\n type: string\n x-dcl-go-name: MainJarFileUri\n description: The HCFS URI of the jar file that contains the main\n class.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Spark. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n sparkRJob:\n type: object\n x-dcl-go-name: SparkRJob\n x-dcl-go-type: WorkflowTemplateJobsSparkRJob\n description: Optional. Job is a SparkR job.\n x-kubernetes-immutable: true\n required:\n - mainRFileUri\n properties:\n archiveUris:\n type: array\n x-dcl-go-name: ArchiveUris\n description: 'Optional. HCFS URIs of archives to be extracted\n into the working directory of each executor. Supported file\n types: .jar, .tar, .tar.gz, .tgz, and .zip.'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional. The arguments to pass to the driver. Do\n not include arguments, such as `--conf`, that can be set as\n job properties, since a collision may occur that causes an incorrect\n job submission.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n fileUris:\n type: array\n x-dcl-go-name: FileUris\n description: Optional. HCFS URIs of files to be placed in the\n working directory of each executor. Useful for naively parallel\n tasks.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkRJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n mainRFileUri:\n type: string\n x-dcl-go-name: MainRFileUri\n description: Required. The HCFS URI of the main R file to use\n as the driver. Must be a .R file.\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure SparkR. Properties that conflict with values\n set by the Dataproc API may be overwritten. Can include properties\n set in /etc/spark/conf/spark-defaults.conf and classes in user\n code.\n x-kubernetes-immutable: true\n sparkSqlJob:\n type: object\n x-dcl-go-name: SparkSqlJob\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJob\n description: Optional. Job is a SparkSql job.\n x-kubernetes-immutable: true\n properties:\n jarFileUris:\n type: array\n x-dcl-go-name: JarFileUris\n description: Optional. HCFS URIs of jar files to be added to the\n Spark CLASSPATH.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n loggingConfig:\n type: object\n x-dcl-go-name: LoggingConfig\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJobLoggingConfig\n description: Optional. The runtime log config for job execution.\n x-kubernetes-immutable: true\n properties:\n driverLogLevels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DriverLogLevels\n description: 'The per-package log levels for the driver. This\n may include \"root\" package name to configure rootLogger.\n Examples: ''com.google = FATAL'', ''root = INFO'', ''org.apache\n = DEBUG'''\n x-kubernetes-immutable: true\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: Optional. A mapping of property names to values,\n used to configure Spark SQL's SparkConf. Properties that conflict\n with values set by the Dataproc API may be overwritten.\n x-kubernetes-immutable: true\n queryFileUri:\n type: string\n x-dcl-go-name: QueryFileUri\n description: The HCFS URI of the script that contains SQL queries.\n x-kubernetes-immutable: true\n queryList:\n type: object\n x-dcl-go-name: QueryList\n x-dcl-go-type: WorkflowTemplateJobsSparkSqlJobQueryList\n description: A list of queries.\n x-kubernetes-immutable: true\n required:\n - queries\n properties:\n queries:\n type: array\n x-dcl-go-name: Queries\n description: 'Required. The queries to execute. You do not\n need to end a query expression with a semicolon. Multiple\n queries can be specified in one string by separating each\n with a semicolon. Here is an example of a Dataproc API snippet\n that uses a QueryList to specify a HiveJob: \"hiveJob\" {\n \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\",\n ] } }'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n scriptVariables:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ScriptVariables\n description: 'Optional. Mapping of query variable names to values\n (equivalent to the Spark SQL command: SET `name=\"value\";`).'\n x-kubernetes-immutable: true\n stepId:\n type: string\n x-dcl-go-name: StepId\n description: Required. The step id. The id must be unique among all\n jobs within the template. The step id is used as prefix for job\n id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds\n field from other steps. The id must contain only letters (a-z, A-Z),\n numbers (0-9), underscores (_), and hyphens (-). Cannot begin or\n end with underscore or hyphen. Must consist of between 3 and 50\n characters.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. The labels to associate with this template. These\n labels will be propagated to all jobs and clusters created by the workflow\n instance. Label **keys** must contain 1 to 63 characters, and must conform\n to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values**\n may be empty, but, if present, must contain 1 to 63 characters, and must\n conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than\n 32 labels can be associated with a template.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Output only. The resource name of the workflow template, as\n described in https://cloud.google.com/apis/design/resource_names. * For\n `projects.regions.workflowTemplates`, the resource name of the template\n has the following format: `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`\n * For `projects.locations.workflowTemplates`, the resource name of the\n template has the following format: `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`'\n x-kubernetes-immutable: true\n parameters:\n type: array\n x-dcl-go-name: Parameters\n description: Optional. Template parameters whose values are substituted\n into the template. Values for parameters must be provided when the template\n is instantiated.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplateParameters\n required:\n - name\n - fields\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Brief description of the parameter. Must not\n exceed 1024 characters.\n x-kubernetes-immutable: true\n fields:\n type: array\n x-dcl-go-name: Fields\n description: 'Required. Paths to all fields that the parameter replaces.\n A field is allowed to appear in at most one parameter''s list of\n field paths. A field path is similar in syntax to a google.protobuf.FieldMask.\n For example, a field path that references the zone field of a workflow\n template''s cluster selector would be specified as `placement.clusterSelector.zone`.\n Also, field paths can reference fields using the following syntax:\n * Values in maps can be referenced by key: * labels[''key''] * placement.clusterSelector.clusterLabels[''key'']\n * placement.managedCluster.labels[''key''] * placement.clusterSelector.clusterLabels[''key'']\n * jobs[''step-id''].labels[''key''] * Jobs in the jobs list can\n be referenced by step-id: * jobs[''step-id''].hadoopJob.mainJarFileUri\n * jobs[''step-id''].hiveJob.queryFileUri * jobs[''step-id''].pySparkJob.mainPythonFileUri\n * jobs[''step-id''].hadoopJob.jarFileUris[0] * jobs[''step-id''].hadoopJob.archiveUris[0]\n * jobs[''step-id''].hadoopJob.fileUris[0] * jobs[''step-id''].pySparkJob.pythonFileUris[0]\n * Items in repeated fields can be referenced by a zero-based index:\n * jobs[''step-id''].sparkJob.args[0] * Other examples: * jobs[''step-id''].hadoopJob.properties[''key'']\n * jobs[''step-id''].hadoopJob.args[0] * jobs[''step-id''].hiveJob.scriptVariables[''key'']\n * jobs[''step-id''].hadoopJob.mainJarFileUri * placement.clusterSelector.zone\n It may not be possible to parameterize maps and repeated fields\n in their entirety since only individual map values and individual\n items in repeated fields can be referenced. For example, the following\n field paths are invalid: - placement.clusterSelector.clusterLabels\n - jobs[''step-id''].sparkJob.args'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Parameter name. The parameter name is used\n as the key, and paired with the parameter value, which are passed\n to the template when the template is instantiated. The name must\n contain only capital letters (A-Z), numbers (0-9), and underscores\n (_), and must not start with a number. The maximum length is 40\n characters.\n x-kubernetes-immutable: true\n validation:\n type: object\n x-dcl-go-name: Validation\n x-dcl-go-type: WorkflowTemplateParametersValidation\n description: Optional. Validation rules to be applied to this parameter's\n value.\n x-kubernetes-immutable: true\n properties:\n regex:\n type: object\n x-dcl-go-name: Regex\n x-dcl-go-type: WorkflowTemplateParametersValidationRegex\n description: Validation based on regular expressions.\n x-kubernetes-immutable: true\n required:\n - regexes\n properties:\n regexes:\n type: array\n x-dcl-go-name: Regexes\n description: Required. RE2 regular expressions used to validate\n the parameter's value. The value must match the regex in\n its entirety (substring matches are not sufficient).\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n values:\n type: object\n x-dcl-go-name: Values\n x-dcl-go-type: WorkflowTemplateParametersValidationValues\n description: Validation based on a list of allowed values.\n x-kubernetes-immutable: true\n required:\n - values\n properties:\n values:\n type: array\n x-dcl-go-name: Values\n description: Required. List of allowed values for the parameter.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n placement:\n type: object\n x-dcl-go-name: Placement\n x-dcl-go-type: WorkflowTemplatePlacement\n description: Required. WorkflowTemplate scheduling information.\n x-kubernetes-immutable: true\n properties:\n clusterSelector:\n type: object\n x-dcl-go-name: ClusterSelector\n x-dcl-go-type: WorkflowTemplatePlacementClusterSelector\n description: Optional. A selector that chooses target cluster for jobs\n based on metadata. The selector is evaluated at the time each job\n is submitted.\n x-kubernetes-immutable: true\n required:\n - clusterLabels\n properties:\n clusterLabels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: ClusterLabels\n description: Required. The cluster labels. Cluster must have all\n labels to match.\n x-kubernetes-immutable: true\n zone:\n type: string\n x-dcl-go-name: Zone\n description: Optional. The zone where workflow process executes.\n This parameter does not affect the selection of the cluster. If\n unspecified, the zone of the first cluster matching the selector\n is used.\n x-kubernetes-immutable: true\n managedCluster:\n type: object\n x-dcl-go-name: ManagedCluster\n x-dcl-go-type: WorkflowTemplatePlacementManagedCluster\n description: A cluster that is managed by the workflow.\n x-kubernetes-immutable: true\n required:\n - clusterName\n - config\n properties:\n clusterName:\n type: string\n x-dcl-go-name: ClusterName\n description: Required. The cluster name prefix. A unique cluster\n name will be formed by appending a random suffix. The name must\n contain only lower-case letters (a-z), numbers (0-9), and hyphens\n (-). Must begin with a letter. Cannot begin or end with hyphen.\n Must consist of between 2 and 35 characters.\n x-kubernetes-immutable: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfig\n description: Required. The cluster configuration.\n x-kubernetes-immutable: true\n properties:\n autoscalingConfig:\n type: object\n x-dcl-go-name: AutoscalingConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig\n description: Optional. Autoscaling config for the policy associated\n with the cluster. Cluster does not autoscale if this field\n is unset.\n x-kubernetes-immutable: true\n properties:\n policy:\n type: string\n x-dcl-go-name: Policy\n description: 'Optional. The autoscaling policy used by the\n cluster. Only resource names including projectid and location\n (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`\n * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`\n Note that the policy must be in the same project and Dataproc\n region.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Dataproc/AutoscalingPolicy\n field: name\n encryptionConfig:\n type: object\n x-dcl-go-name: EncryptionConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig\n description: Optional. Encryption settings for the cluster.\n x-kubernetes-immutable: true\n properties:\n gcePdKmsKeyName:\n type: string\n x-dcl-go-name: GcePdKmsKeyName\n description: Optional. The Cloud KMS key name to use for\n PD disk encryption for all instances in the cluster.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: selfLink\n endpointConfig:\n type: object\n x-dcl-go-name: EndpointConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigEndpointConfig\n description: Optional. Port/endpoint configuration for this\n cluster\n x-kubernetes-immutable: true\n properties:\n enableHttpPortAccess:\n type: boolean\n x-dcl-go-name: EnableHttpPortAccess\n description: Optional. If true, enable http access to specific\n ports on the cluster from external sources. Defaults to\n false.\n x-kubernetes-immutable: true\n httpPorts:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: HttpPorts\n readOnly: true\n description: Output only. The map of port descriptions to\n URLs. Will only be populated if enable_http_port_access\n is true.\n x-kubernetes-immutable: true\n gceClusterConfig:\n type: object\n x-dcl-go-name: GceClusterConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig\n description: Optional. The shared Compute Engine config settings\n for all instances in a cluster.\n x-kubernetes-immutable: true\n properties:\n internalIPOnly:\n type: boolean\n x-dcl-go-name: InternalIPOnly\n description: Optional. If true, all instances in the cluster\n will only have internal IP addresses. By default, clusters\n are not restricted to internal IP addresses, and will\n have ephemeral external IP addresses assigned to each\n instance. This `internal_ip_only` restriction can only\n be enabled for subnetwork enabled networks, and all off-cluster\n dependencies must be configured to be accessible without\n external IP addresses.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n metadata:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Metadata\n description: The Compute Engine metadata entries to add\n to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).\n x-kubernetes-immutable: true\n network:\n type: string\n x-dcl-go-name: Network\n description: 'Optional. The Compute Engine network to be\n used for machine communications. Cannot be specified with\n subnetwork_uri. If neither `network_uri` nor `subnetwork_uri`\n is specified, the \"default\" network of the project is\n used, if it exists. Cannot be a \"Custom Subnet Network\"\n (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks)\n for more information). A full URL, partial URI, or short\n name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`\n * `projects/[project_id]/regions/global/default` * `default`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Network\n field: selfLink\n nodeGroupAffinity:\n type: object\n x-dcl-go-name: NodeGroupAffinity\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity\n description: Optional. Node Group Affinity for sole-tenant\n clusters.\n x-kubernetes-immutable: true\n required:\n - nodeGroup\n properties:\n nodeGroup:\n type: string\n x-dcl-go-name: NodeGroup\n description: 'Required. The URI of a sole-tenant [node\n group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)\n that the cluster will be created on. A full URL, partial\n URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`\n * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`\n * `node-group-1`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/NodeGroup\n field: selfLink\n privateIPv6GoogleAccess:\n type: string\n x-dcl-go-name: PrivateIPv6GoogleAccess\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum\n description: 'Optional. The type of IPv6 access for a cluster.\n Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED,\n INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL'\n x-kubernetes-immutable: true\n enum:\n - PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED\n - INHERIT_FROM_SUBNETWORK\n - OUTBOUND\n - BIDIRECTIONAL\n reservationAffinity:\n type: object\n x-dcl-go-name: ReservationAffinity\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity\n description: Optional. Reservation Affinity for consuming\n Zonal reservation.\n x-kubernetes-immutable: true\n properties:\n consumeReservationType:\n type: string\n x-dcl-go-name: ConsumeReservationType\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum\n description: 'Optional. Type of reservation to consume\n Possible values: TYPE_UNSPECIFIED, NO_RESERVATION,\n ANY_RESERVATION, SPECIFIC_RESERVATION'\n x-kubernetes-immutable: true\n enum:\n - TYPE_UNSPECIFIED\n - NO_RESERVATION\n - ANY_RESERVATION\n - SPECIFIC_RESERVATION\n key:\n type: string\n x-dcl-go-name: Key\n description: Optional. Corresponds to the label key\n of reservation resource.\n x-kubernetes-immutable: true\n values:\n type: array\n x-dcl-go-name: Values\n description: Optional. Corresponds to the label values\n of reservation resource.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc)\n (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))\n used by Dataproc cluster VM instances to access Google\n Cloud Platform services. If not specified, the [Compute\n Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account)\n is used.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n serviceAccountScopes:\n type: array\n x-dcl-go-name: ServiceAccountScopes\n description: 'Optional. The URIs of service account scopes\n to be included in Compute Engine instances. The following\n base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly\n * https://www.googleapis.com/auth/devstorage.read_write\n * https://www.googleapis.com/auth/logging.write If no\n scopes are specified, the following defaults are also\n provided: * https://www.googleapis.com/auth/bigquery *\n https://www.googleapis.com/auth/bigtable.admin.table *\n https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n subnetwork:\n type: string\n x-dcl-go-name: Subnetwork\n description: 'Optional. The Compute Engine subnetwork to\n be used for machine communications. Cannot be specified\n with network_uri. A full URL, partial URI, or short name\n are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0`\n * `projects/[project_id]/regions/us-east1/subnetworks/sub0`\n * `sub0`'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Subnetwork\n field: selfLink\n tags:\n type: array\n x-dcl-go-name: Tags\n description: The Compute Engine tags to add to all instances\n (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: string\n x-dcl-go-type: string\n zone:\n type: string\n x-dcl-go-name: Zone\n description: 'Optional. The zone where the Compute Engine\n cluster will be located. On a create request, it is required\n in the \"global\" region. If omitted in a non-global Dataproc\n region, the service will pick a zone in the corresponding\n Compute Engine region. On a get request, zone will always\n be present. A full URL, partial URI, or short name are\n valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`\n * `projects/[project_id]/zones/[zone]` * `us-central1-f`'\n x-kubernetes-immutable: true\n initializationActions:\n type: array\n x-dcl-go-name: InitializationActions\n description: 'Optional. Commands to execute on each node after\n config is completed. By default, executables are run on master\n and all worker nodes. You can test a node''s `role` metadata\n to run an executable on a master or worker node, as shown\n below using `curl` (you can also use `wget`): ROLE=$(curl\n -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)\n if [[ \"${ROLE}\" == ''Master'' ]]; then ... master specific\n actions ... else ... worker specific actions ... fi'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigInitializationActions\n properties:\n executableFile:\n type: string\n x-dcl-go-name: ExecutableFile\n description: Required. Cloud Storage URI of executable\n file.\n x-kubernetes-immutable: true\n executionTimeout:\n type: string\n x-dcl-go-name: ExecutionTimeout\n description: Optional. Amount of time executable has to\n complete. Default is 10 minutes (see JSON representation\n of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n Cluster creation fails with an explanatory error message\n (the name of the executable that caused the error and\n the exceeded timeout period) if the executable is not\n completed at end of the timeout period.\n x-kubernetes-immutable: true\n lifecycleConfig:\n type: object\n x-dcl-go-name: LifecycleConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig\n description: Optional. Lifecycle setting for the cluster.\n x-kubernetes-immutable: true\n properties:\n autoDeleteTime:\n type: string\n format: date-time\n x-dcl-go-name: AutoDeleteTime\n description: Optional. The time when cluster will be auto-deleted\n (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n autoDeleteTtl:\n type: string\n x-dcl-go-name: AutoDeleteTtl\n description: Optional. The lifetime duration of cluster.\n The cluster will be auto-deleted at the end of this period.\n Minimum value is 10 minutes; maximum value is 14 days\n (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n idleDeleteTtl:\n type: string\n x-dcl-go-name: IdleDeleteTtl\n description: Optional. The duration to keep the cluster\n alive while idling (when no jobs are running). Passing\n this threshold will cause the cluster to be deleted. Minimum\n value is 5 minutes; maximum value is 14 days (see JSON\n representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n idleStartTime:\n type: string\n format: date-time\n x-dcl-go-name: IdleStartTime\n readOnly: true\n description: Output only. The time when cluster became idle\n (most recent job finished) and became eligible for deletion\n due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n x-kubernetes-immutable: true\n masterConfig:\n type: object\n x-dcl-go-name: MasterConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfig\n description: Optional. The Compute Engine config settings for\n the master instance in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n secondaryWorkerConfig:\n type: object\n x-dcl-go-name: SecondaryWorkerConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig\n description: Optional. The Compute Engine config settings for\n additional worker instances in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n securityConfig:\n type: object\n x-dcl-go-name: SecurityConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecurityConfig\n description: Optional. Security settings for the cluster.\n x-kubernetes-immutable: true\n properties:\n kerberosConfig:\n type: object\n x-dcl-go-name: KerberosConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig\n description: Optional. Kerberos related configuration.\n x-kubernetes-immutable: true\n properties:\n crossRealmTrustAdminServer:\n type: string\n x-dcl-go-name: CrossRealmTrustAdminServer\n description: Optional. The admin server (IP or hostname)\n for the remote trusted realm in a cross realm trust\n relationship.\n x-kubernetes-immutable: true\n crossRealmTrustKdc:\n type: string\n x-dcl-go-name: CrossRealmTrustKdc\n description: Optional. The KDC (IP or hostname) for\n the remote trusted realm in a cross realm trust relationship.\n x-kubernetes-immutable: true\n crossRealmTrustRealm:\n type: string\n x-dcl-go-name: CrossRealmTrustRealm\n description: Optional. The remote realm the Dataproc\n on-cluster KDC will trust, should the user enable\n cross realm trust.\n x-kubernetes-immutable: true\n crossRealmTrustSharedPassword:\n type: string\n x-dcl-go-name: CrossRealmTrustSharedPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the shared password between\n the on-cluster Kerberos realm and the remote trusted\n realm, in a cross realm trust relationship.\n x-kubernetes-immutable: true\n enableKerberos:\n type: boolean\n x-dcl-go-name: EnableKerberos\n description: 'Optional. Flag to indicate whether to\n Kerberize the cluster (default: false). Set this field\n to true to enable Kerberos on a cluster.'\n x-kubernetes-immutable: true\n kdcDbKey:\n type: string\n x-dcl-go-name: KdcDbKey\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the master key of the KDC\n database.\n x-kubernetes-immutable: true\n keyPassword:\n type: string\n x-dcl-go-name: KeyPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided key. For the self-signed certificate, this\n password is generated by Dataproc.\n x-kubernetes-immutable: true\n keystore:\n type: string\n x-dcl-go-name: Keystore\n description: Optional. The Cloud Storage URI of the\n keystore file used for SSL encryption. If not provided,\n Dataproc will provide a self-signed certificate.\n x-kubernetes-immutable: true\n keystorePassword:\n type: string\n x-dcl-go-name: KeystorePassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided keystore. For the self-signed certificate,\n this password is generated by Dataproc.\n x-kubernetes-immutable: true\n kmsKey:\n type: string\n x-dcl-go-name: KmsKey\n description: Optional. The uri of the KMS key used to\n encrypt various sensitive files.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: selfLink\n realm:\n type: string\n x-dcl-go-name: Realm\n description: Optional. The name of the on-cluster Kerberos\n realm. If not specified, the uppercased domain of\n hostnames will be the realm.\n x-kubernetes-immutable: true\n rootPrincipalPassword:\n type: string\n x-dcl-go-name: RootPrincipalPassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the root principal password.\n x-kubernetes-immutable: true\n tgtLifetimeHours:\n type: integer\n format: int64\n x-dcl-go-name: TgtLifetimeHours\n description: Optional. The lifetime of the ticket granting\n ticket, in hours. If not specified, or user specifies\n 0, then default value 10 will be used.\n x-kubernetes-immutable: true\n truststore:\n type: string\n x-dcl-go-name: Truststore\n description: Optional. The Cloud Storage URI of the\n truststore file used for SSL encryption. If not provided,\n Dataproc will provide a self-signed certificate.\n x-kubernetes-immutable: true\n truststorePassword:\n type: string\n x-dcl-go-name: TruststorePassword\n description: Optional. The Cloud Storage URI of a KMS\n encrypted file containing the password to the user\n provided truststore. For the self-signed certificate,\n this password is generated by Dataproc.\n x-kubernetes-immutable: true\n softwareConfig:\n type: object\n x-dcl-go-name: SoftwareConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig\n description: Optional. The config settings for software inside\n the cluster.\n x-kubernetes-immutable: true\n properties:\n imageVersion:\n type: string\n x-dcl-go-name: ImageVersion\n description: Optional. The version of software inside the\n cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions),\n such as \"1.2\" (including a subminor version, such as \"1.2.29\"),\n or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).\n If unspecified, it defaults to the latest Debian version.\n x-kubernetes-immutable: true\n optionalComponents:\n type: array\n x-dcl-go-name: OptionalComponents\n description: Optional. The set of components to activate\n on the cluster.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum\n enum:\n - COMPONENT_UNSPECIFIED\n - ANACONDA\n - DOCKER\n - DRUID\n - FLINK\n - HBASE\n - HIVE_WEBHCAT\n - JUPYTER\n - KERBEROS\n - PRESTO\n - RANGER\n - SOLR\n - ZEPPELIN\n - ZOOKEEPER\n properties:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Properties\n description: 'Optional. The properties to set on daemon\n config files. Property keys are specified in `prefix:property`\n format, for example `core:hadoop.tmp.dir`. The following\n are supported prefixes and their mappings: * capacity-scheduler:\n `capacity-scheduler.xml` * core: `core-site.xml` * distcp:\n `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml`\n * mapred: `mapred-site.xml` * pig: `pig.properties` *\n spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For\n more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).'\n x-kubernetes-immutable: true\n stagingBucket:\n type: string\n x-dcl-go-name: StagingBucket\n description: Optional. A Cloud Storage bucket used to stage\n job dependencies, config files, and job driver console output.\n If you do not specify a staging bucket, Cloud Dataproc will\n determine a Cloud Storage location (US, ASIA, or EU) for your\n cluster's staging bucket according to the Compute Engine zone\n where your cluster is deployed, and then create and manage\n this project-level, per-location bucket (see [Dataproc staging\n bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).\n **This field requires a Cloud Storage bucket name, not a URI\n to a Cloud Storage bucket.**\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n tempBucket:\n type: string\n x-dcl-go-name: TempBucket\n description: Optional. A Cloud Storage bucket used to store\n ephemeral cluster and jobs data, such as Spark and MapReduce\n history files. If you do not specify a temp bucket, Dataproc\n will determine a Cloud Storage location (US, ASIA, or EU)\n for your cluster's temp bucket according to the Compute Engine\n zone where your cluster is deployed, and then create and manage\n this project-level, per-location bucket. The default bucket\n has a TTL of 90 days, but you can use any TTL (or none) if\n you specify a bucket. **This field requires a Cloud Storage\n bucket name, not a URI to a Cloud Storage bucket.**\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n workerConfig:\n type: object\n x-dcl-go-name: WorkerConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfig\n description: Optional. The Compute Engine config settings for\n worker instances in a cluster.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n accelerators:\n type: array\n x-dcl-go-name: Accelerators\n description: Optional. The Compute Engine accelerator configuration\n for these instances.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators\n properties:\n acceleratorCount:\n type: integer\n format: int64\n x-dcl-go-name: AcceleratorCount\n description: The number of the accelerator cards of\n this type exposed to this instance.\n x-kubernetes-immutable: true\n acceleratorType:\n type: string\n x-dcl-go-name: AcceleratorType\n description: 'Full URL, partial URI, or short name\n of the accelerator type resource to expose to this\n instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).\n Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`\n * `nvidia-tesla-k80` **Auto Zone Exception**: If\n you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the accelerator\n type resource, for example, `nvidia-tesla-k80`.'\n x-kubernetes-immutable: true\n diskConfig:\n type: object\n x-dcl-go-name: DiskConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig\n description: Optional. Disk option config settings.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n bootDiskSizeGb:\n type: integer\n format: int64\n x-dcl-go-name: BootDiskSizeGb\n description: Optional. Size in GB of the boot disk (default\n is 500GB).\n x-kubernetes-immutable: true\n bootDiskType:\n type: string\n x-dcl-go-name: BootDiskType\n description: 'Optional. Type of the boot disk (default\n is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent\n Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent\n Disk Solid State Drive), or \"pd-standard\" (Persistent\n Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).'\n x-kubernetes-immutable: true\n numLocalSsds:\n type: integer\n format: int64\n x-dcl-go-name: NumLocalSsds\n description: Optional. Number of attached SSDs, from\n 0 to 4 (default is 0). If SSDs are not attached, the\n boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html)\n data. If one or more SSDs are attached, this runtime\n bulk data is spread across them, and the boot disk\n contains only basic config and installed binaries.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Optional. The Compute Engine image resource\n used for cluster instances. The URI can represent an image\n or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`\n * `projects/[project_id]/global/images/[image-id]` * `image-id`\n Image family examples. Dataproc will use the most recent\n image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`\n * `projects/[project_id]/global/images/family/[custom-image-family-name]`\n If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version`\n or the system default.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Image\n field: selfLink\n instanceNames:\n type: array\n x-dcl-go-name: InstanceNames\n readOnly: true\n description: Output only. The list of instance names. Dataproc\n derives the names from `cluster_name`, `num_instances`,\n and the instance group.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n isPreemptible:\n type: boolean\n x-dcl-go-name: IsPreemptible\n readOnly: true\n description: Output only. Specifies that this instance group\n contains preemptible instances.\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: 'Optional. The Compute Engine machine type\n used for cluster instances. A full URL, partial URI, or\n short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`\n * `n1-standard-2` **Auto Zone Exception**: If you are\n using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)\n feature, you must use the short name of the machine type\n resource, for example, `n1-standard-2`.'\n x-kubernetes-immutable: true\n managedGroupConfig:\n type: object\n x-dcl-go-name: ManagedGroupConfig\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig\n readOnly: true\n description: Output only. The config for Compute Engine\n Instance Group Manager that manages this group. This is\n only used for preemptible instance groups.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n instanceGroupManagerName:\n type: string\n x-dcl-go-name: InstanceGroupManagerName\n readOnly: true\n description: Output only. The name of the Instance Group\n Manager for this group.\n x-kubernetes-immutable: true\n instanceTemplateName:\n type: string\n x-dcl-go-name: InstanceTemplateName\n readOnly: true\n description: Output only. The name of the Instance Template\n used for the Managed Instance Group.\n x-kubernetes-immutable: true\n minCpuPlatform:\n type: string\n x-dcl-go-name: MinCpuPlatform\n description: Optional. Specifies the minimum cpu platform\n for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n numInstances:\n type: integer\n format: int64\n x-dcl-go-name: NumInstances\n description: Optional. The number of VM instances in the\n instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)\n [master_config](#FIELDS.master_config) groups, **must\n be set to 3**. For standard cluster [master_config](#FIELDS.master_config)\n groups, **must be set to 1**.\n x-kubernetes-immutable: true\n preemptibility:\n type: string\n x-dcl-go-name: Preemptibility\n x-dcl-go-type: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum\n description: 'Optional. Specifies the preemptibility of\n the instance group. The default value for master and worker\n groups is `NON_PREEMPTIBLE`. This default cannot be changed.\n The default value for secondary instances is `PREEMPTIBLE`.\n Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE,\n PREEMPTIBLE'\n x-kubernetes-immutable: true\n enum:\n - PREEMPTIBILITY_UNSPECIFIED\n - NON_PREEMPTIBLE\n - PREEMPTIBLE\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The labels to associate with this cluster.\n Label keys must be between 1 and 63 characters long, and must\n conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62}\n Label values must be between 1 and 63 characters long, and must\n conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63}\n No more than 32 labels can be associated with a given cluster.'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time template was last updated.\n x-kubernetes-immutable: true\n version:\n type: integer\n format: int64\n x-dcl-go-name: Version\n readOnly: true\n description: Output only. The current version of this workflow template.\n x-kubernetes-immutable: true\n") -// 126004 bytes -// MD5: 07b7a370f4f96b993f941cd8df6b5bc6 +// 126000 bytes +// MD5: afcf4c86be93ea2e17b30e9af07f94e4 diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template.go index 9e5ca12b08..e4a5e1bbb8 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template.go @@ -10841,10 +10841,9 @@ func (c *Client) GetDeidentifyTemplate(ctx context.Context, r *DeidentifyTemplat if err != nil { return nil, err } - nr := r.urlNormalized() - result.Location = nr.Location - result.Parent = nr.Parent - result.Name = nr.Name + result.Location = r.Location + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template.yaml index 09a55e3902..feec0b4cb5 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template.yaml @@ -900,7 +900,7 @@ components: to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines - the format: {info type name}({surrogate character + the format {info type name}({surrogate character count}):{surrogate} For example, if the name of custom info type is ''MY_TOKEN_INFO_TYPE'' and the surrogate is ''abc'', the full replacement value @@ -3113,7 +3113,7 @@ components: it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines - the format: {info type name}({surrogate + the format {info type name}({surrogate character count}):{surrogate} For example, if the name of custom info type is ''MY_TOKEN_INFO_TYPE'' and the surrogate is ''abc'', the full @@ -5014,7 +5014,7 @@ components: to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines - the format: {info type name}({surrogate character + the format {info type name}({surrogate character count}):{surrogate} For example, if the name of custom info type is ''MY_TOKEN_INFO_TYPE'' and the surrogate is ''abc'', the full replacement value diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_beta_yaml_embed.go index 495876068a..30c9fbb453 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/dlp/beta/deidentify_template.yaml -var YAML_deidentify_template = []byte("info:\n title: Dlp/DeidentifyTemplate\n description: The Dlp DeidentifyTemplate resource\n x-dcl-struct-name: DeidentifyTemplate\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a DeidentifyTemplate\n parameters:\n - name: DeidentifyTemplate\n required: true\n description: A full instance of a DeidentifyTemplate\n apply:\n description: The function used to apply information about a DeidentifyTemplate\n parameters:\n - name: DeidentifyTemplate\n required: true\n description: A full instance of a DeidentifyTemplate\n delete:\n description: The function used to delete a DeidentifyTemplate\n parameters:\n - name: DeidentifyTemplate\n required: true\n description: A full instance of a DeidentifyTemplate\n deleteAll:\n description: The function used to delete all DeidentifyTemplate\n parameters:\n - name: location\n required: true\n schema:\n type: string\n - name: parent\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many DeidentifyTemplate\n parameters:\n - name: location\n required: true\n schema:\n type: string\n - name: parent\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n DeidentifyTemplate:\n title: DeidentifyTemplate\n x-dcl-id: '{{parent}}/deidentifyTemplates/{{name}}'\n x-dcl-locations:\n - region\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - parent\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation timestamp of an inspectTemplate.\n x-kubernetes-immutable: true\n deidentifyConfig:\n type: object\n x-dcl-go-name: DeidentifyConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfig\n description: The core content of the template.\n properties:\n infoTypeTransformations:\n type: object\n x-dcl-go-name: InfoTypeTransformations\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformations\n description: Treat the dataset as free-form text and apply the same\n free text transformation everywhere.\n x-dcl-conflicts:\n - recordTransformations\n required:\n - transformations\n properties:\n transformations:\n type: array\n x-dcl-go-name: Transformations\n description: Required. Transformation for each infoType. Cannot\n specify more than one for a given infoType.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations\n required:\n - primitiveTransformation\n properties:\n infoTypes:\n type: array\n x-dcl-go-name: InfoTypes\n description: InfoTypes to apply the transformation to. An\n empty list will cause this transformation to apply to all\n findings that correspond to infoTypes that were requested\n in `InspectConfig`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type. Either a\n name of your choosing when creating a CustomInfoType,\n or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending Cloud\n DLP results to Data Catalog, infoType names should\n conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n primitiveTransformation:\n type: object\n x-dcl-go-name: PrimitiveTransformation\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation\n description: Required. Primitive transformation to apply to\n the infoType.\n properties:\n bucketingConfig:\n type: object\n x-dcl-go-name: BucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig\n description: Bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n buckets:\n type: array\n x-dcl-go-name: Buckets\n description: Set of buckets. Ranges must be non-overlapping.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets\n required:\n - replacementValue\n properties:\n max:\n type: object\n x-dcl-go-name: Max\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax\n description: Upper bound of the range, exclusive;\n type must match min.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n min:\n type: object\n x-dcl-go-name: Min\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin\n description: Lower bound of the range, inclusive.\n Type should be the same as max if used.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replacementValue:\n type: object\n x-dcl-go-name: ReplacementValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue\n description: Required. Replacement value for\n this bucket.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n characterMaskConfig:\n type: object\n x-dcl-go-name: CharacterMaskConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig\n description: Mask\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n charactersToIgnore:\n type: array\n x-dcl-go-name: CharactersToIgnore\n description: When masking a string, items in this\n list will be skipped when replacing characters.\n For example, if the input string is `555-555-5555`\n and you instruct Cloud DLP to skip `-` and mask\n 5 characters with `*`, Cloud DLP returns `***-**5-5555`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore\n properties:\n charactersToSkip:\n type: string\n x-dcl-go-name: CharactersToSkip\n description: Characters to not transform when\n masking.\n x-dcl-conflicts:\n - commonCharactersToIgnore\n commonCharactersToIgnore:\n type: string\n x-dcl-go-name: CommonCharactersToIgnore\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnoreEnum\n description: 'Common characters to not transform\n when masking. Useful to avoid removing punctuation.\n Possible values: COMMON_CHARS_TO_IGNORE_UNSPECIFIED,\n NUMERIC, ALPHA_UPPER_CASE, ALPHA_LOWER_CASE,\n PUNCTUATION, WHITESPACE'\n x-dcl-conflicts:\n - charactersToSkip\n enum:\n - COMMON_CHARS_TO_IGNORE_UNSPECIFIED\n - NUMERIC\n - ALPHA_UPPER_CASE\n - ALPHA_LOWER_CASE\n - PUNCTUATION\n - WHITESPACE\n maskingCharacter:\n type: string\n x-dcl-go-name: MaskingCharacter\n description: Character to use to mask the sensitive\n values—for example, `*` for an alphabetic string\n such as a name, or `0` for a numeric string such\n as ZIP code or credit card number. This string must\n have a length of 1. If not supplied, this value\n defaults to `*` for strings, and `0` for digits.\n numberToMask:\n type: integer\n format: int64\n x-dcl-go-name: NumberToMask\n description: Number of characters to mask. If not\n set, all matching chars will be masked. Skipped\n characters do not count towards this tally.\n reverseOrder:\n type: boolean\n x-dcl-go-name: ReverseOrder\n description: Mask characters in reverse order. For\n example, if `masking_character` is `0`, `number_to_mask`\n is `14`, and `reverse_order` is `false`, then the\n input string `1234-5678-9012-3456` is masked as\n `00000000000000-3456`. If `masking_character` is\n `*`, `number_to_mask` is `3`, and `reverse_order`\n is `true`, then the string `12345` is masked as\n `12***`.\n cryptoDeterministicConfig:\n type: object\n x-dcl-go-name: CryptoDeterministicConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig\n description: Deterministic Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext\n description: 'A context may be used for higher security\n and maintaining referential integrity such that\n the same identifier in two different contexts will\n be given a distinct surrogate. The context is appended\n to plaintext value being encrypted. On decryption\n the provided context is validated against the value\n used during encryption. If a context was provided\n during encryption, same context must be provided\n during decryption as well. If the context is not\n set, plaintext would be used as is for encryption.\n If the context is set but: 1. there is no record\n present when transforming a given value or 2. the\n field is not present when transforming a given value,\n plaintext would be used as is for encryption. Note\n that case (1) is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey\n description: The key used by the encryption function.\n For deterministic encryption using AES-SIV, the\n provided key is internally expanded to 64 bytes\n prior to use.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType\n description: 'The custom info type to annotate the\n surrogate with. This annotation will be applied\n to the surrogate by prefixing it with the name of\n the custom info type followed by the number of characters\n comprising the surrogate. The following scheme defines\n the format: {info type name}({surrogate character\n count}):{surrogate} For example, if the name of\n custom info type is ''MY_TOKEN_INFO_TYPE'' and the\n surrogate is ''abc'', the full replacement value\n will be: ''MY_TOKEN_INFO_TYPE(3):abc'' This annotation\n identifies the surrogate when inspecting content\n using the custom info type ''Surrogate''. This facilitates\n reversal of the surrogate when it occurs in free\n text. Note: For record transformations where the\n entire cell in a table is being transformed, surrogates\n are not mandatory. Surrogates are used to denote\n the location of the token and are necessary for\n re-identification in free form text. In order for\n inspection to work properly, the name of this info\n type must not occur naturally anywhere in your data;\n otherwise, inspection may either - reverse a surrogate\n that does not correspond to an actual identifier\n - be unable to parse the surrogate and result in\n an error Therefore, choose your custom info type\n name carefully after considering what your data\n looks like. One way to select a name that has a\n high chance of yielding reliable detection is to\n include one or more unicode characters that are\n highly improbable to exist in your data. For example,\n assuming your data is entered from a regular ASCII\n keyboard, the symbol with the hex code point 29DD\n might be used like so: ⧝MY_TOKEN_TYPE.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type. Either\n a name of your choosing when creating a CustomInfoType,\n or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending\n Cloud DLP results to Data Catalog, infoType\n names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n cryptoHashConfig:\n type: object\n x-dcl-go-name: CryptoHashConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig\n description: Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey\n description: The key used by the hash function.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n cryptoReplaceFfxFpeConfig:\n type: object\n x-dcl-go-name: CryptoReplaceFfxFpeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig\n description: Ffx-Fpe\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - cryptoKey\n properties:\n commonAlphabet:\n type: string\n x-dcl-go-name: CommonAlphabet\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabetEnum\n description: 'Common alphabets. Possible values: FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED,\n NUMERIC, HEXADECIMAL, UPPER_CASE_ALPHA_NUMERIC,\n ALPHA_NUMERIC'\n x-dcl-conflicts:\n - customAlphabet\n - radix\n enum:\n - FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED\n - NUMERIC\n - HEXADECIMAL\n - UPPER_CASE_ALPHA_NUMERIC\n - ALPHA_NUMERIC\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext\n description: 'The ''tweak'', a context may be used\n for higher security since the same identifier in\n two different contexts won''t be given the same\n surrogate. If the context is not set, a default\n tweak will be used. If the context is set but: 1.\n there is no record present when transforming a given\n value or 1. the field is not present when transforming\n a given value, a default tweak will be used. Note\n that case (1) is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s. Currently, the referenced field\n may be of value type integer or string. The tweak\n is constructed as a sequence of bytes in big endian\n byte order such that: - a 64 bit integer is encoded\n followed by a single byte of value 1 - a string\n is encoded in UTF-8 format followed by a single\n byte of value 2'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey\n description: Required. The key used by the encryption\n algorithm.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n customAlphabet:\n type: string\n x-dcl-go-name: CustomAlphabet\n description: 'This is supported by mapping these to\n the alphanumeric characters that the FFX mode natively\n supports. This happens before/after encryption/decryption.\n Each character listed must appear only once. Number\n of characters must be in the range [2, 95]. This\n must be encoded as ASCII. The order of characters\n does not matter. The full list of allowed characters\n is: ``0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n ~`!@#$%^&*()_-+={[}]|:;\"''<,>.?/``'\n x-dcl-conflicts:\n - commonAlphabet\n - radix\n radix:\n type: integer\n format: int64\n x-dcl-go-name: Radix\n description: The native way to select the alphabet.\n Must be in the range [2, 95].\n x-dcl-conflicts:\n - commonAlphabet\n - customAlphabet\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType\n description: 'The custom infoType to annotate the\n surrogate with. This annotation will be applied\n to the surrogate by prefixing it with the name of\n the custom infoType followed by the number of characters\n comprising the surrogate. The following scheme defines\n the format: info_type_name(surrogate_character_count):surrogate\n For example, if the name of custom infoType is ''MY_TOKEN_INFO_TYPE''\n and the surrogate is ''abc'', the full replacement\n value will be: ''MY_TOKEN_INFO_TYPE(3):abc'' This\n annotation identifies the surrogate when inspecting\n content using the custom infoType [`SurrogateType`](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype).\n This facilitates reversal of the surrogate when\n it occurs in free text. In order for inspection\n to work properly, the name of this infoType must\n not occur naturally anywhere in your data; otherwise,\n inspection may find a surrogate that does not correspond\n to an actual identifier. Therefore, choose your\n custom infoType name carefully after considering\n what your data looks like. One way to select a name\n that has a high chance of yielding reliable detection\n is to include one or more unicode characters that\n are highly improbable to exist in your data. For\n example, assuming your data is entered from a regular\n ASCII keyboard, the symbol with the hex code point\n 29DD might be used like so: ⧝MY_TOKEN_TYPE'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type. Either\n a name of your choosing when creating a CustomInfoType,\n or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending\n Cloud DLP results to Data Catalog, infoType\n names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n dateShiftConfig:\n type: object\n x-dcl-go-name: DateShiftConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig\n description: Date Shift\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - cryptoDeterministicConfig\n required:\n - upperBoundDays\n - lowerBoundDays\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext\n description: Points to the field that contains the\n context, for example, an entity id. If set, must\n also set cryptoKey. If set, shift will be consistent\n for the given context.\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey\n description: Causes the shift to be computed based\n on this key and the context. This results in the\n same shift for the same context and crypto_key.\n If set, must also set context. Can only be applied\n to table items.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n lowerBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: LowerBoundDays\n description: Required. For example, -5 means shift\n date to at most 5 days back in the past.\n upperBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: UpperBoundDays\n description: Required. Range of shift in days. Actual\n shift will be selected at random within this range\n (inclusive ends). Negative means shift to earlier\n in time. Must not be more than 365250 days (1000\n years) each direction. For example, 3 means shift\n date to at most 3 days into the future.\n fixedSizeBucketingConfig:\n type: object\n x-dcl-go-name: FixedSizeBucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig\n description: Fixed size bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - lowerBound\n - upperBound\n - bucketSize\n properties:\n bucketSize:\n type: number\n format: double\n x-dcl-go-name: BucketSize\n description: 'Required. Size of each bucket (except\n for minimum and maximum buckets). So if `lower_bound`\n = 10, `upper_bound` = 89, and `bucket_size` = 10,\n then the following buckets would be used: -10, 10-20,\n 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89,\n 89+. Precision up to 2 decimals works.'\n lowerBound:\n type: object\n x-dcl-go-name: LowerBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound\n description: Required. Lower bound value of buckets.\n All values less than `lower_bound` are grouped together\n into a single bucket; for example if `lower_bound`\n = 10, then all values less than 10 are replaced\n with the value \"-10\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n upperBound:\n type: object\n x-dcl-go-name: UpperBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound\n description: Required. Upper bound value of buckets.\n All values greater than upper_bound are grouped\n together into a single bucket; for example if `upper_bound`\n = 89, then all values greater than 89 are replaced\n with the value \"89+\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n redactConfig:\n type: object\n x-dcl-go-name: RedactConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig\n description: Redact\n x-dcl-conflicts:\n - replaceConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n replaceConfig:\n type: object\n x-dcl-go-name: ReplaceConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig\n description: Replace with a specified value.\n x-dcl-conflicts:\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n newValue:\n type: object\n x-dcl-go-name: NewValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue\n description: Value to replace it with.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replaceWithInfoTypeConfig:\n type: object\n x-dcl-go-name: ReplaceWithInfoTypeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig\n description: Replace with infotype\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n timePartConfig:\n type: object\n x-dcl-go-name: TimePartConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig\n description: Time extraction\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n partToExtract:\n type: string\n x-dcl-go-name: PartToExtract\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtractEnum\n description: 'The part of the time to keep. Possible\n values: TIME_PART_UNSPECIFIED, YEAR, MONTH, DAY_OF_MONTH,\n DAY_OF_WEEK, WEEK_OF_YEAR, HOUR_OF_DAY'\n enum:\n - TIME_PART_UNSPECIFIED\n - YEAR\n - MONTH\n - DAY_OF_MONTH\n - DAY_OF_WEEK\n - WEEK_OF_YEAR\n - HOUR_OF_DAY\n recordTransformations:\n type: object\n x-dcl-go-name: RecordTransformations\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformations\n description: Treat the dataset as structured. Transformations can be\n applied to specific locations within structured datasets, such as\n transforming a column within a table.\n x-dcl-conflicts:\n - infoTypeTransformations\n properties:\n fieldTransformations:\n type: array\n x-dcl-go-name: FieldTransformations\n description: Transform the record by applying various field transformations.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformations\n required:\n - fields\n properties:\n condition:\n type: object\n x-dcl-go-name: Condition\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsCondition\n description: 'Only apply the transformation if the condition\n evaluates to true for the given `RecordCondition`. The conditions\n are allowed to reference fields that are not used in the\n actual transformation. Example Use Cases: - Apply a different\n bucket transformation to an age column if the zip code column\n for the same record is within a specific range. - Redact\n a field if the date of birth field is greater than 85.'\n properties:\n expressions:\n type: object\n x-dcl-go-name: Expressions\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressions\n description: An expression.\n properties:\n conditions:\n type: object\n x-dcl-go-name: Conditions\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditions\n description: Conditions to apply to the expression.\n properties:\n conditions:\n type: array\n x-dcl-go-name: Conditions\n description: A collection of conditions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditions\n required:\n - field\n - operator\n properties:\n field:\n type: object\n x-dcl-go-name: Field\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsField\n description: Required. Field within the\n record this condition is evaluated against.\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n operator:\n type: string\n x-dcl-go-name: Operator\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsOperatorEnum\n description: 'Required. Operator used to\n compare the field or infoType to the value.\n Possible values: LOGICAL_OPERATOR_UNSPECIFIED,\n AND'\n enum:\n - LOGICAL_OPERATOR_UNSPECIFIED\n - AND\n value:\n type: object\n x-dcl-go-name: Value\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValue\n description: Value to compare against. [Mandatory,\n except for `EXISTS` tests.]\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must\n be from 1 to 31 and valid for\n the year and month, or 0 to specify\n a year by itself or a year and\n month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must\n be from 1 to 12, or 0 to specify\n a year without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24\n hour format. Should be from 0\n to 23. An API may choose to allow\n the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of\n day. Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds\n in nanoseconds. Must be from 0\n to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally be\n from 0 to 59. An API may allow\n the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n logicalOperator:\n type: string\n x-dcl-go-name: LogicalOperator\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsLogicalOperatorEnum\n description: 'The operator to apply to the result\n of conditions. Default and currently only supported\n value is `AND`. Possible values: LOGICAL_OPERATOR_UNSPECIFIED,\n AND'\n enum:\n - LOGICAL_OPERATOR_UNSPECIFIED\n - AND\n fields:\n type: array\n x-dcl-go-name: Fields\n description: Required. Input field(s) to apply the transformation\n to. When you have columns that reference their position\n within a list, omit the index from the FieldId. FieldId\n name matching ignores the index. For example, instead of\n \"contact.nums[0].type\", use \"contact.nums.type\".\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFields\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n infoTypeTransformations:\n type: object\n x-dcl-go-name: InfoTypeTransformations\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformations\n description: Treat the contents of the field as free text,\n and selectively transform content that matches an `InfoType`.\n x-dcl-conflicts:\n - primitiveTransformation\n required:\n - transformations\n properties:\n transformations:\n type: array\n x-dcl-go-name: Transformations\n description: Required. Transformation for each infoType.\n Cannot specify more than one for a given infoType.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformations\n required:\n - primitiveTransformation\n properties:\n infoTypes:\n type: array\n x-dcl-go-name: InfoTypes\n description: InfoTypes to apply the transformation\n to. An empty list will cause this transformation\n to apply to all findings that correspond to infoTypes\n that were requested in `InspectConfig`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypes\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type.\n Either a name of your choosing when creating\n a CustomInfoType, or one of the names listed\n at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending\n Cloud DLP results to Data Catalog, infoType\n names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n primitiveTransformation:\n type: object\n x-dcl-go-name: PrimitiveTransformation\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformation\n description: Required. Primitive transformation\n to apply to the infoType.\n properties:\n bucketingConfig:\n type: object\n x-dcl-go-name: BucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig\n description: Bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n buckets:\n type: array\n x-dcl-go-name: Buckets\n description: Set of buckets. Ranges must\n be non-overlapping.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets\n required:\n - replacementValue\n properties:\n max:\n type: object\n x-dcl-go-name: Max\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax\n description: Upper bound of the range,\n exclusive; type must match min.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month.\n Must be from 1 to 31 and\n valid for the year and month,\n or 0 to specify a year by\n itself or a year and month\n where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year.\n Must be from 1 to 12, or\n 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date.\n Must be from 1 to 9999,\n or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValueEnum\n description: 'day of week Possible\n values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY,\n THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day\n in 24 hour format. Should\n be from 0 to 23. An API\n may choose to allow the\n value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour\n of day. Must be from 0 to\n 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of\n seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally\n be from 0 to 59. An API\n may allow the value 60 if\n it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n min:\n type: object\n x-dcl-go-name: Min\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin\n description: Lower bound of the range,\n inclusive. Type should be the same\n as max if used.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month.\n Must be from 1 to 31 and\n valid for the year and month,\n or 0 to specify a year by\n itself or a year and month\n where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year.\n Must be from 1 to 12, or\n 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date.\n Must be from 1 to 9999,\n or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValueEnum\n description: 'day of week Possible\n values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY,\n THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day\n in 24 hour format. Should\n be from 0 to 23. An API\n may choose to allow the\n value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour\n of day. Must be from 0 to\n 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of\n seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally\n be from 0 to 59. An API\n may allow the value 60 if\n it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replacementValue:\n type: object\n x-dcl-go-name: ReplacementValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue\n description: Required. Replacement\n value for this bucket.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month.\n Must be from 1 to 31 and\n valid for the year and month,\n or 0 to specify a year by\n itself or a year and month\n where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year.\n Must be from 1 to 12, or\n 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date.\n Must be from 1 to 9999,\n or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValueEnum\n description: 'day of week Possible\n values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY,\n THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day\n in 24 hour format. Should\n be from 0 to 23. An API\n may choose to allow the\n value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour\n of day. Must be from 0 to\n 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of\n seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally\n be from 0 to 59. An API\n may allow the value 60 if\n it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n characterMaskConfig:\n type: object\n x-dcl-go-name: CharacterMaskConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig\n description: Mask\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n charactersToIgnore:\n type: array\n x-dcl-go-name: CharactersToIgnore\n description: When masking a string, items\n in this list will be skipped when replacing\n characters. For example, if the input\n string is `555-555-5555` and you instruct\n Cloud DLP to skip `-` and mask 5 characters\n with `*`, Cloud DLP returns `***-**5-5555`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore\n properties:\n charactersToSkip:\n type: string\n x-dcl-go-name: CharactersToSkip\n description: Characters to not transform\n when masking.\n x-dcl-conflicts:\n - commonCharactersToIgnore\n commonCharactersToIgnore:\n type: string\n x-dcl-go-name: CommonCharactersToIgnore\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnoreEnum\n description: 'Common characters to\n not transform when masking. Useful\n to avoid removing punctuation. Possible\n values: COMMON_CHARS_TO_IGNORE_UNSPECIFIED,\n NUMERIC, ALPHA_UPPER_CASE, ALPHA_LOWER_CASE,\n PUNCTUATION, WHITESPACE'\n x-dcl-conflicts:\n - charactersToSkip\n enum:\n - COMMON_CHARS_TO_IGNORE_UNSPECIFIED\n - NUMERIC\n - ALPHA_UPPER_CASE\n - ALPHA_LOWER_CASE\n - PUNCTUATION\n - WHITESPACE\n maskingCharacter:\n type: string\n x-dcl-go-name: MaskingCharacter\n description: Character to use to mask the\n sensitive values—for example, `*` for\n an alphabetic string such as a name, or\n `0` for a numeric string such as ZIP code\n or credit card number. This string must\n have a length of 1. If not supplied, this\n value defaults to `*` for strings, and\n `0` for digits.\n numberToMask:\n type: integer\n format: int64\n x-dcl-go-name: NumberToMask\n description: Number of characters to mask.\n If not set, all matching chars will be\n masked. Skipped characters do not count\n towards this tally.\n reverseOrder:\n type: boolean\n x-dcl-go-name: ReverseOrder\n description: Mask characters in reverse\n order. For example, if `masking_character`\n is `0`, `number_to_mask` is `14`, and\n `reverse_order` is `false`, then the input\n string `1234-5678-9012-3456` is masked\n as `00000000000000-3456`. If `masking_character`\n is `*`, `number_to_mask` is `3`, and `reverse_order`\n is `true`, then the string `12345` is\n masked as `12***`.\n cryptoDeterministicConfig:\n type: object\n x-dcl-go-name: CryptoDeterministicConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig\n description: Deterministic Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext\n description: 'A context may be used for\n higher security and maintaining referential\n integrity such that the same identifier\n in two different contexts will be given\n a distinct surrogate. The context is appended\n to plaintext value being encrypted. On\n decryption the provided context is validated\n against the value used during encryption.\n If a context was provided during encryption,\n same context must be provided during decryption\n as well. If the context is not set, plaintext\n would be used as is for encryption. If\n the context is set but: 1. there is no\n record present when transforming a given\n value or 2. the field is not present when\n transforming a given value, plaintext\n would be used as is for encryption. Note\n that case (1) is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey\n description: The key used by the encryption\n function. For deterministic encryption\n using AES-SIV, the provided key is internally\n expanded to 64 bytes prior to use.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud\n KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource\n name of the KMS CryptoKey to use\n for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped\n data crypto key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of\n the key. This is an arbitrary\n string used to differentiate different\n keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated\n key if their names are the same.\n When the data crypto key is generated,\n this name is not used in any way\n (repeating the api call will result\n in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256\n bit key.\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType\n description: 'The custom info type to annotate\n the surrogate with. This annotation will\n be applied to the surrogate by prefixing\n it with the name of the custom info type\n followed by the number of characters comprising\n the surrogate. The following scheme defines\n the format: {info type name}({surrogate\n character count}):{surrogate} For example,\n if the name of custom info type is ''MY_TOKEN_INFO_TYPE''\n and the surrogate is ''abc'', the full\n replacement value will be: ''MY_TOKEN_INFO_TYPE(3):abc''\n This annotation identifies the surrogate\n when inspecting content using the custom\n info type ''Surrogate''. This facilitates\n reversal of the surrogate when it occurs\n in free text. Note: For record transformations\n where the entire cell in a table is being\n transformed, surrogates are not mandatory.\n Surrogates are used to denote the location\n of the token and are necessary for re-identification\n in free form text. In order for inspection\n to work properly, the name of this info\n type must not occur naturally anywhere\n in your data; otherwise, inspection may\n either - reverse a surrogate that does\n not correspond to an actual identifier\n - be unable to parse the surrogate and\n result in an error Therefore, choose your\n custom info type name carefully after\n considering what your data looks like.\n One way to select a name that has a high\n chance of yielding reliable detection\n is to include one or more unicode characters\n that are highly improbable to exist in\n your data. For example, assuming your\n data is entered from a regular ASCII keyboard,\n the symbol with the hex code point 29DD\n might be used like so: ⧝MY_TOKEN_TYPE.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information\n type. Either a name of your choosing\n when creating a CustomInfoType, or\n one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When\n sending Cloud DLP results to Data\n Catalog, infoType names should conform\n to the pattern `[A-Za-z0-9$-_]{1,64}`.\n cryptoHashConfig:\n type: object\n x-dcl-go-name: CryptoHashConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig\n description: Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey\n description: The key used by the hash function.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud\n KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource\n name of the KMS CryptoKey to use\n for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped\n data crypto key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of\n the key. This is an arbitrary\n string used to differentiate different\n keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated\n key if their names are the same.\n When the data crypto key is generated,\n this name is not used in any way\n (repeating the api call will result\n in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256\n bit key.\n cryptoReplaceFfxFpeConfig:\n type: object\n x-dcl-go-name: CryptoReplaceFfxFpeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig\n description: Ffx-Fpe\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - cryptoKey\n properties:\n commonAlphabet:\n type: string\n x-dcl-go-name: CommonAlphabet\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabetEnum\n description: 'Common alphabets. Possible\n values: FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED,\n NUMERIC, HEXADECIMAL, UPPER_CASE_ALPHA_NUMERIC,\n ALPHA_NUMERIC'\n x-dcl-conflicts:\n - customAlphabet\n - radix\n enum:\n - FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED\n - NUMERIC\n - HEXADECIMAL\n - UPPER_CASE_ALPHA_NUMERIC\n - ALPHA_NUMERIC\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext\n description: 'The ''tweak'', a context may\n be used for higher security since the\n same identifier in two different contexts\n won''t be given the same surrogate. If\n the context is not set, a default tweak\n will be used. If the context is set but:\n 1. there is no record present when transforming\n a given value or 1. the field is not present\n when transforming a given value, a default\n tweak will be used. Note that case (1)\n is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s. Currently, the referenced\n field may be of value type integer or\n string. The tweak is constructed as a\n sequence of bytes in big endian byte order\n such that: - a 64 bit integer is encoded\n followed by a single byte of value 1 -\n a string is encoded in UTF-8 format followed\n by a single byte of value 2'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey\n description: Required. The key used by the\n encryption algorithm.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud\n KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource\n name of the KMS CryptoKey to use\n for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped\n data crypto key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of\n the key. This is an arbitrary\n string used to differentiate different\n keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated\n key if their names are the same.\n When the data crypto key is generated,\n this name is not used in any way\n (repeating the api call will result\n in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256\n bit key.\n customAlphabet:\n type: string\n x-dcl-go-name: CustomAlphabet\n description: 'This is supported by mapping\n these to the alphanumeric characters that\n the FFX mode natively supports. This happens\n before/after encryption/decryption. Each\n character listed must appear only once.\n Number of characters must be in the range\n [2, 95]. This must be encoded as ASCII.\n The order of characters does not matter.\n The full list of allowed characters is:\n ``0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n ~`!@#$%^&*()_-+={[}]|:;\"''<,>.?/``'\n x-dcl-conflicts:\n - commonAlphabet\n - radix\n radix:\n type: integer\n format: int64\n x-dcl-go-name: Radix\n description: The native way to select the\n alphabet. Must be in the range [2, 95].\n x-dcl-conflicts:\n - commonAlphabet\n - customAlphabet\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType\n description: 'The custom infoType to annotate\n the surrogate with. This annotation will\n be applied to the surrogate by prefixing\n it with the name of the custom infoType\n followed by the number of characters comprising\n the surrogate. The following scheme defines\n the format: info_type_name(surrogate_character_count):surrogate\n For example, if the name of custom infoType\n is ''MY_TOKEN_INFO_TYPE'' and the surrogate\n is ''abc'', the full replacement value\n will be: ''MY_TOKEN_INFO_TYPE(3):abc''\n This annotation identifies the surrogate\n when inspecting content using the custom\n infoType [`SurrogateType`](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype).\n This facilitates reversal of the surrogate\n when it occurs in free text. In order\n for inspection to work properly, the name\n of this infoType must not occur naturally\n anywhere in your data; otherwise, inspection\n may find a surrogate that does not correspond\n to an actual identifier. Therefore, choose\n your custom infoType name carefully after\n considering what your data looks like.\n One way to select a name that has a high\n chance of yielding reliable detection\n is to include one or more unicode characters\n that are highly improbable to exist in\n your data. For example, assuming your\n data is entered from a regular ASCII keyboard,\n the symbol with the hex code point 29DD\n might be used like so: ⧝MY_TOKEN_TYPE'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information\n type. Either a name of your choosing\n when creating a CustomInfoType, or\n one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When\n sending Cloud DLP results to Data\n Catalog, infoType names should conform\n to the pattern `[A-Za-z0-9$-_]{1,64}`.\n dateShiftConfig:\n type: object\n x-dcl-go-name: DateShiftConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig\n description: Date Shift\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - cryptoDeterministicConfig\n required:\n - upperBoundDays\n - lowerBoundDays\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext\n description: Points to the field that contains\n the context, for example, an entity id.\n If set, must also set cryptoKey. If set,\n shift will be consistent for the given\n context.\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey\n description: Causes the shift to be computed\n based on this key and the context. This\n results in the same shift for the same\n context and crypto_key. If set, must also\n set context. Can only be applied to table\n items.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud\n KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource\n name of the KMS CryptoKey to use\n for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped\n data crypto key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of\n the key. This is an arbitrary\n string used to differentiate different\n keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated\n key if their names are the same.\n When the data crypto key is generated,\n this name is not used in any way\n (repeating the api call will result\n in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256\n bit key.\n lowerBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: LowerBoundDays\n description: Required. For example, -5 means\n shift date to at most 5 days back in the\n past.\n upperBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: UpperBoundDays\n description: Required. Range of shift in\n days. Actual shift will be selected at\n random within this range (inclusive ends).\n Negative means shift to earlier in time.\n Must not be more than 365250 days (1000\n years) each direction. For example, 3\n means shift date to at most 3 days into\n the future.\n fixedSizeBucketingConfig:\n type: object\n x-dcl-go-name: FixedSizeBucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig\n description: Fixed size bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - lowerBound\n - upperBound\n - bucketSize\n properties:\n bucketSize:\n type: number\n format: double\n x-dcl-go-name: BucketSize\n description: 'Required. Size of each bucket\n (except for minimum and maximum buckets).\n So if `lower_bound` = 10, `upper_bound`\n = 89, and `bucket_size` = 10, then the\n following buckets would be used: -10,\n 10-20, 20-30, 30-40, 40-50, 50-60, 60-70,\n 70-80, 80-89, 89+. Precision up to 2 decimals\n works.'\n lowerBound:\n type: object\n x-dcl-go-name: LowerBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound\n description: Required. Lower bound value\n of buckets. All values less than `lower_bound`\n are grouped together into a single bucket;\n for example if `lower_bound` = 10, then\n all values less than 10 are replaced with\n the value \"-10\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must\n be from 1 to 31 and valid for\n the year and month, or 0 to specify\n a year by itself or a year and\n month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must\n be from 1 to 12, or 0 to specify\n a year without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24\n hour format. Should be from 0\n to 23. An API may choose to allow\n the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of\n day. Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds\n in nanoseconds. Must be from 0\n to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally be\n from 0 to 59. An API may allow\n the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n upperBound:\n type: object\n x-dcl-go-name: UpperBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound\n description: Required. Upper bound value\n of buckets. All values greater than upper_bound\n are grouped together into a single bucket;\n for example if `upper_bound` = 89, then\n all values greater than 89 are replaced\n with the value \"89+\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must\n be from 1 to 31 and valid for\n the year and month, or 0 to specify\n a year by itself or a year and\n month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must\n be from 1 to 12, or 0 to specify\n a year without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24\n hour format. Should be from 0\n to 23. An API may choose to allow\n the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of\n day. Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds\n in nanoseconds. Must be from 0\n to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally be\n from 0 to 59. An API may allow\n the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n redactConfig:\n type: object\n x-dcl-go-name: RedactConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig\n description: Redact\n x-dcl-conflicts:\n - replaceConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n replaceConfig:\n type: object\n x-dcl-go-name: ReplaceConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig\n description: Replace with a specified value.\n x-dcl-conflicts:\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n newValue:\n type: object\n x-dcl-go-name: NewValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue\n description: Value to replace it with.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must\n be from 1 to 31 and valid for\n the year and month, or 0 to specify\n a year by itself or a year and\n month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must\n be from 1 to 12, or 0 to specify\n a year without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24\n hour format. Should be from 0\n to 23. An API may choose to allow\n the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of\n day. Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds\n in nanoseconds. Must be from 0\n to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally be\n from 0 to 59. An API may allow\n the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replaceWithInfoTypeConfig:\n type: object\n x-dcl-go-name: ReplaceWithInfoTypeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig\n description: Replace with infotype\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n timePartConfig:\n type: object\n x-dcl-go-name: TimePartConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig\n description: Time extraction\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n partToExtract:\n type: string\n x-dcl-go-name: PartToExtract\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtractEnum\n description: 'The part of the time to keep.\n Possible values: TIME_PART_UNSPECIFIED,\n YEAR, MONTH, DAY_OF_MONTH, DAY_OF_WEEK,\n WEEK_OF_YEAR, HOUR_OF_DAY'\n enum:\n - TIME_PART_UNSPECIFIED\n - YEAR\n - MONTH\n - DAY_OF_MONTH\n - DAY_OF_WEEK\n - WEEK_OF_YEAR\n - HOUR_OF_DAY\n primitiveTransformation:\n type: object\n x-dcl-go-name: PrimitiveTransformation\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformation\n description: Apply the transformation to the entire field.\n x-dcl-conflicts:\n - infoTypeTransformations\n properties:\n bucketingConfig:\n type: object\n x-dcl-go-name: BucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfig\n description: Bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n buckets:\n type: array\n x-dcl-go-name: Buckets\n description: Set of buckets. Ranges must be non-overlapping.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBuckets\n required:\n - replacementValue\n properties:\n max:\n type: object\n x-dcl-go-name: Max\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMax\n description: Upper bound of the range, exclusive;\n type must match min.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n min:\n type: object\n x-dcl-go-name: Min\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMin\n description: Lower bound of the range, inclusive.\n Type should be the same as max if used.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replacementValue:\n type: object\n x-dcl-go-name: ReplacementValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue\n description: Required. Replacement value for\n this bucket.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n characterMaskConfig:\n type: object\n x-dcl-go-name: CharacterMaskConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfig\n description: Mask\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n charactersToIgnore:\n type: array\n x-dcl-go-name: CharactersToIgnore\n description: When masking a string, items in this\n list will be skipped when replacing characters.\n For example, if the input string is `555-555-5555`\n and you instruct Cloud DLP to skip `-` and mask\n 5 characters with `*`, Cloud DLP returns `***-**5-5555`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore\n properties:\n charactersToSkip:\n type: string\n x-dcl-go-name: CharactersToSkip\n description: Characters to not transform when\n masking.\n x-dcl-conflicts:\n - commonCharactersToIgnore\n commonCharactersToIgnore:\n type: string\n x-dcl-go-name: CommonCharactersToIgnore\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnoreEnum\n description: 'Common characters to not transform\n when masking. Useful to avoid removing punctuation.\n Possible values: COMMON_CHARS_TO_IGNORE_UNSPECIFIED,\n NUMERIC, ALPHA_UPPER_CASE, ALPHA_LOWER_CASE,\n PUNCTUATION, WHITESPACE'\n x-dcl-conflicts:\n - charactersToSkip\n enum:\n - COMMON_CHARS_TO_IGNORE_UNSPECIFIED\n - NUMERIC\n - ALPHA_UPPER_CASE\n - ALPHA_LOWER_CASE\n - PUNCTUATION\n - WHITESPACE\n maskingCharacter:\n type: string\n x-dcl-go-name: MaskingCharacter\n description: Character to use to mask the sensitive\n values—for example, `*` for an alphabetic string\n such as a name, or `0` for a numeric string such\n as ZIP code or credit card number. This string must\n have a length of 1. If not supplied, this value\n defaults to `*` for strings, and `0` for digits.\n numberToMask:\n type: integer\n format: int64\n x-dcl-go-name: NumberToMask\n description: Number of characters to mask. If not\n set, all matching chars will be masked. Skipped\n characters do not count towards this tally.\n reverseOrder:\n type: boolean\n x-dcl-go-name: ReverseOrder\n description: Mask characters in reverse order. For\n example, if `masking_character` is `0`, `number_to_mask`\n is `14`, and `reverse_order` is `false`, then the\n input string `1234-5678-9012-3456` is masked as\n `00000000000000-3456`. If `masking_character` is\n `*`, `number_to_mask` is `3`, and `reverse_order`\n is `true`, then the string `12345` is masked as\n `12***`.\n cryptoDeterministicConfig:\n type: object\n x-dcl-go-name: CryptoDeterministicConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfig\n description: Deterministic Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContext\n description: 'A context may be used for higher security\n and maintaining referential integrity such that\n the same identifier in two different contexts will\n be given a distinct surrogate. The context is appended\n to plaintext value being encrypted. On decryption\n the provided context is validated against the value\n used during encryption. If a context was provided\n during encryption, same context must be provided\n during decryption as well. If the context is not\n set, plaintext would be used as is for encryption.\n If the context is set but: 1. there is no record\n present when transforming a given value or 2. the\n field is not present when transforming a given value,\n plaintext would be used as is for encryption. Note\n that case (1) is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey\n description: The key used by the encryption function.\n For deterministic encryption using AES-SIV, the\n provided key is internally expanded to 64 bytes\n prior to use.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType\n description: 'The custom info type to annotate the\n surrogate with. This annotation will be applied\n to the surrogate by prefixing it with the name of\n the custom info type followed by the number of characters\n comprising the surrogate. The following scheme defines\n the format: {info type name}({surrogate character\n count}):{surrogate} For example, if the name of\n custom info type is ''MY_TOKEN_INFO_TYPE'' and the\n surrogate is ''abc'', the full replacement value\n will be: ''MY_TOKEN_INFO_TYPE(3):abc'' This annotation\n identifies the surrogate when inspecting content\n using the custom info type ''Surrogate''. This facilitates\n reversal of the surrogate when it occurs in free\n text. Note: For record transformations where the\n entire cell in a table is being transformed, surrogates\n are not mandatory. Surrogates are used to denote\n the location of the token and are necessary for\n re-identification in free form text. In order for\n inspection to work properly, the name of this info\n type must not occur naturally anywhere in your data;\n otherwise, inspection may either - reverse a surrogate\n that does not correspond to an actual identifier\n - be unable to parse the surrogate and result in\n an error Therefore, choose your custom info type\n name carefully after considering what your data\n looks like. One way to select a name that has a\n high chance of yielding reliable detection is to\n include one or more unicode characters that are\n highly improbable to exist in your data. For example,\n assuming your data is entered from a regular ASCII\n keyboard, the symbol with the hex code point 29DD\n might be used like so: ⧝MY_TOKEN_TYPE.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type. Either\n a name of your choosing when creating a CustomInfoType,\n or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending\n Cloud DLP results to Data Catalog, infoType\n names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n cryptoHashConfig:\n type: object\n x-dcl-go-name: CryptoHashConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfig\n description: Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey\n description: The key used by the hash function.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n cryptoReplaceFfxFpeConfig:\n type: object\n x-dcl-go-name: CryptoReplaceFfxFpeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig\n description: Ffx-Fpe\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - cryptoKey\n properties:\n commonAlphabet:\n type: string\n x-dcl-go-name: CommonAlphabet\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabetEnum\n description: 'Common alphabets. Possible values: FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED,\n NUMERIC, HEXADECIMAL, UPPER_CASE_ALPHA_NUMERIC,\n ALPHA_NUMERIC'\n x-dcl-conflicts:\n - customAlphabet\n - radix\n enum:\n - FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED\n - NUMERIC\n - HEXADECIMAL\n - UPPER_CASE_ALPHA_NUMERIC\n - ALPHA_NUMERIC\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext\n description: 'The ''tweak'', a context may be used\n for higher security since the same identifier in\n two different contexts won''t be given the same\n surrogate. If the context is not set, a default\n tweak will be used. If the context is set but: 1.\n there is no record present when transforming a given\n value or 1. the field is not present when transforming\n a given value, a default tweak will be used. Note\n that case (1) is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s. Currently, the referenced field\n may be of value type integer or string. The tweak\n is constructed as a sequence of bytes in big endian\n byte order such that: - a 64 bit integer is encoded\n followed by a single byte of value 1 - a string\n is encoded in UTF-8 format followed by a single\n byte of value 2'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey\n description: Required. The key used by the encryption\n algorithm.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n customAlphabet:\n type: string\n x-dcl-go-name: CustomAlphabet\n description: 'This is supported by mapping these to\n the alphanumeric characters that the FFX mode natively\n supports. This happens before/after encryption/decryption.\n Each character listed must appear only once. Number\n of characters must be in the range [2, 95]. This\n must be encoded as ASCII. The order of characters\n does not matter. The full list of allowed characters\n is: ``0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n ~`!@#$%^&*()_-+={[}]|:;\"''<,>.?/``'\n x-dcl-conflicts:\n - commonAlphabet\n - radix\n radix:\n type: integer\n format: int64\n x-dcl-go-name: Radix\n description: The native way to select the alphabet.\n Must be in the range [2, 95].\n x-dcl-conflicts:\n - commonAlphabet\n - customAlphabet\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType\n description: 'The custom infoType to annotate the\n surrogate with. This annotation will be applied\n to the surrogate by prefixing it with the name of\n the custom infoType followed by the number of characters\n comprising the surrogate. The following scheme defines\n the format: info_type_name(surrogate_character_count):surrogate\n For example, if the name of custom infoType is ''MY_TOKEN_INFO_TYPE''\n and the surrogate is ''abc'', the full replacement\n value will be: ''MY_TOKEN_INFO_TYPE(3):abc'' This\n annotation identifies the surrogate when inspecting\n content using the custom infoType [`SurrogateType`](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype).\n This facilitates reversal of the surrogate when\n it occurs in free text. In order for inspection\n to work properly, the name of this infoType must\n not occur naturally anywhere in your data; otherwise,\n inspection may find a surrogate that does not correspond\n to an actual identifier. Therefore, choose your\n custom infoType name carefully after considering\n what your data looks like. One way to select a name\n that has a high chance of yielding reliable detection\n is to include one or more unicode characters that\n are highly improbable to exist in your data. For\n example, assuming your data is entered from a regular\n ASCII keyboard, the symbol with the hex code point\n 29DD might be used like so: ⧝MY_TOKEN_TYPE'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type. Either\n a name of your choosing when creating a CustomInfoType,\n or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending\n Cloud DLP results to Data Catalog, infoType\n names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n dateShiftConfig:\n type: object\n x-dcl-go-name: DateShiftConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfig\n description: Date Shift\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - cryptoDeterministicConfig\n required:\n - upperBoundDays\n - lowerBoundDays\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContext\n description: Points to the field that contains the\n context, for example, an entity id. If set, must\n also set cryptoKey. If set, shift will be consistent\n for the given context.\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKey\n description: Causes the shift to be computed based\n on this key and the context. This results in the\n same shift for the same context and crypto_key.\n If set, must also set context. Can only be applied\n to table items.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n lowerBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: LowerBoundDays\n description: Required. For example, -5 means shift\n date to at most 5 days back in the past.\n upperBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: UpperBoundDays\n description: Required. Range of shift in days. Actual\n shift will be selected at random within this range\n (inclusive ends). Negative means shift to earlier\n in time. Must not be more than 365250 days (1000\n years) each direction. For example, 3 means shift\n date to at most 3 days into the future.\n fixedSizeBucketingConfig:\n type: object\n x-dcl-go-name: FixedSizeBucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfig\n description: Fixed size bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - lowerBound\n - upperBound\n - bucketSize\n properties:\n bucketSize:\n type: number\n format: double\n x-dcl-go-name: BucketSize\n description: 'Required. Size of each bucket (except\n for minimum and maximum buckets). So if `lower_bound`\n = 10, `upper_bound` = 89, and `bucket_size` = 10,\n then the following buckets would be used: -10, 10-20,\n 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89,\n 89+. Precision up to 2 decimals works.'\n lowerBound:\n type: object\n x-dcl-go-name: LowerBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound\n description: Required. Lower bound value of buckets.\n All values less than `lower_bound` are grouped together\n into a single bucket; for example if `lower_bound`\n = 10, then all values less than 10 are replaced\n with the value \"-10\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n upperBound:\n type: object\n x-dcl-go-name: UpperBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound\n description: Required. Upper bound value of buckets.\n All values greater than upper_bound are grouped\n together into a single bucket; for example if `upper_bound`\n = 89, then all values greater than 89 are replaced\n with the value \"89+\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n redactConfig:\n type: object\n x-dcl-go-name: RedactConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationRedactConfig\n description: Redact\n x-dcl-conflicts:\n - replaceConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n replaceConfig:\n type: object\n x-dcl-go-name: ReplaceConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfig\n description: Replace with a specified value.\n x-dcl-conflicts:\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n newValue:\n type: object\n x-dcl-go-name: NewValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValue\n description: Value to replace it with.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replaceWithInfoTypeConfig:\n type: object\n x-dcl-go-name: ReplaceWithInfoTypeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig\n description: Replace with infotype\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n timePartConfig:\n type: object\n x-dcl-go-name: TimePartConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfig\n description: Time extraction\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n partToExtract:\n type: string\n x-dcl-go-name: PartToExtract\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfigPartToExtractEnum\n description: 'The part of the time to keep. Possible\n values: TIME_PART_UNSPECIFIED, YEAR, MONTH, DAY_OF_MONTH,\n DAY_OF_WEEK, WEEK_OF_YEAR, HOUR_OF_DAY'\n enum:\n - TIME_PART_UNSPECIFIED\n - YEAR\n - MONTH\n - DAY_OF_MONTH\n - DAY_OF_WEEK\n - WEEK_OF_YEAR\n - HOUR_OF_DAY\n recordSuppressions:\n type: array\n x-dcl-go-name: RecordSuppressions\n description: Configuration defining which records get suppressed\n entirely. Records that match any suppression rule are omitted\n from the output.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressions\n properties:\n condition:\n type: object\n x-dcl-go-name: Condition\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsCondition\n description: A condition that when it evaluates to true will\n result in the record being evaluated to be suppressed from\n the transformed content.\n properties:\n expressions:\n type: object\n x-dcl-go-name: Expressions\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressions\n description: An expression.\n properties:\n conditions:\n type: object\n x-dcl-go-name: Conditions\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditions\n description: Conditions to apply to the expression.\n properties:\n conditions:\n type: array\n x-dcl-go-name: Conditions\n description: A collection of conditions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditions\n required:\n - field\n - operator\n properties:\n field:\n type: object\n x-dcl-go-name: Field\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsField\n description: Required. Field within the\n record this condition is evaluated against.\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n operator:\n type: string\n x-dcl-go-name: Operator\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsOperatorEnum\n description: 'Required. Operator used to\n compare the field or infoType to the value.\n Possible values: LOGICAL_OPERATOR_UNSPECIFIED,\n AND'\n enum:\n - LOGICAL_OPERATOR_UNSPECIFIED\n - AND\n value:\n type: object\n x-dcl-go-name: Value\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValue\n description: Value to compare against. [Mandatory,\n except for `EXISTS` tests.]\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must\n be from 1 to 31 and valid for\n the year and month, or 0 to specify\n a year by itself or a year and\n month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must\n be from 1 to 12, or 0 to specify\n a year without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24\n hour format. Should be from 0\n to 23. An API may choose to allow\n the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of\n day. Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds\n in nanoseconds. Must be from 0\n to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally be\n from 0 to 59. An API may allow\n the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n logicalOperator:\n type: string\n x-dcl-go-name: LogicalOperator\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsLogicalOperatorEnum\n description: 'The operator to apply to the result\n of conditions. Default and currently only supported\n value is `AND`. Possible values: LOGICAL_OPERATOR_UNSPECIFIED,\n AND'\n enum:\n - LOGICAL_OPERATOR_UNSPECIFIED\n - AND\n transformationErrorHandling:\n type: object\n x-dcl-go-name: TransformationErrorHandling\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigTransformationErrorHandling\n description: Mode for handling transformation errors. If left unspecified,\n the default mode is `TransformationErrorHandling.ThrowError`.\n properties:\n leaveUntransformed:\n type: object\n x-dcl-go-name: LeaveUntransformed\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigTransformationErrorHandlingLeaveUntransformed\n description: Ignore errors\n x-dcl-conflicts:\n - throwError\n throwError:\n type: object\n x-dcl-go-name: ThrowError\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigTransformationErrorHandlingThrowError\n description: Throw an error\n x-dcl-conflicts:\n - leaveUntransformed\n description:\n type: string\n x-dcl-go-name: Description\n description: Short description (max 256 chars).\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: Display name (max 256 chars).\n location:\n type: string\n x-dcl-go-name: Location\n description: The location of the resource\n x-kubernetes-immutable: true\n locationId:\n type: string\n x-dcl-go-name: LocationId\n readOnly: true\n description: Output only. The geographic location where this resource is\n stored.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Output only. The template name. The template will have one\n of the following formats: `projects/PROJECT_ID/deidentifyTemplates/TEMPLATE_ID`\n OR `organizations/ORGANIZATION_ID/deidentifyTemplates/TEMPLATE_ID`'\n x-kubernetes-immutable: true\n x-dcl-server-generated-parameter: true\n parent:\n type: string\n x-dcl-go-name: Parent\n description: The parent of the resource\n x-kubernetes-immutable: true\n x-dcl-forward-slash-allowed: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Organization\n field: name\n parent: true\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last update timestamp of an inspectTemplate.\n x-kubernetes-immutable: true\n") +var YAML_deidentify_template = []byte("info:\n title: Dlp/DeidentifyTemplate\n description: The Dlp DeidentifyTemplate resource\n x-dcl-struct-name: DeidentifyTemplate\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a DeidentifyTemplate\n parameters:\n - name: DeidentifyTemplate\n required: true\n description: A full instance of a DeidentifyTemplate\n apply:\n description: The function used to apply information about a DeidentifyTemplate\n parameters:\n - name: DeidentifyTemplate\n required: true\n description: A full instance of a DeidentifyTemplate\n delete:\n description: The function used to delete a DeidentifyTemplate\n parameters:\n - name: DeidentifyTemplate\n required: true\n description: A full instance of a DeidentifyTemplate\n deleteAll:\n description: The function used to delete all DeidentifyTemplate\n parameters:\n - name: location\n required: true\n schema:\n type: string\n - name: parent\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many DeidentifyTemplate\n parameters:\n - name: location\n required: true\n schema:\n type: string\n - name: parent\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n DeidentifyTemplate:\n title: DeidentifyTemplate\n x-dcl-id: '{{parent}}/deidentifyTemplates/{{name}}'\n x-dcl-locations:\n - region\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - parent\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation timestamp of an inspectTemplate.\n x-kubernetes-immutable: true\n deidentifyConfig:\n type: object\n x-dcl-go-name: DeidentifyConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfig\n description: The core content of the template.\n properties:\n infoTypeTransformations:\n type: object\n x-dcl-go-name: InfoTypeTransformations\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformations\n description: Treat the dataset as free-form text and apply the same\n free text transformation everywhere.\n x-dcl-conflicts:\n - recordTransformations\n required:\n - transformations\n properties:\n transformations:\n type: array\n x-dcl-go-name: Transformations\n description: Required. Transformation for each infoType. Cannot\n specify more than one for a given infoType.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations\n required:\n - primitiveTransformation\n properties:\n infoTypes:\n type: array\n x-dcl-go-name: InfoTypes\n description: InfoTypes to apply the transformation to. An\n empty list will cause this transformation to apply to all\n findings that correspond to infoTypes that were requested\n in `InspectConfig`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type. Either a\n name of your choosing when creating a CustomInfoType,\n or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending Cloud\n DLP results to Data Catalog, infoType names should\n conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n primitiveTransformation:\n type: object\n x-dcl-go-name: PrimitiveTransformation\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation\n description: Required. Primitive transformation to apply to\n the infoType.\n properties:\n bucketingConfig:\n type: object\n x-dcl-go-name: BucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig\n description: Bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n buckets:\n type: array\n x-dcl-go-name: Buckets\n description: Set of buckets. Ranges must be non-overlapping.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets\n required:\n - replacementValue\n properties:\n max:\n type: object\n x-dcl-go-name: Max\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax\n description: Upper bound of the range, exclusive;\n type must match min.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n min:\n type: object\n x-dcl-go-name: Min\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin\n description: Lower bound of the range, inclusive.\n Type should be the same as max if used.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replacementValue:\n type: object\n x-dcl-go-name: ReplacementValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue\n description: Required. Replacement value for\n this bucket.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n characterMaskConfig:\n type: object\n x-dcl-go-name: CharacterMaskConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig\n description: Mask\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n charactersToIgnore:\n type: array\n x-dcl-go-name: CharactersToIgnore\n description: When masking a string, items in this\n list will be skipped when replacing characters.\n For example, if the input string is `555-555-5555`\n and you instruct Cloud DLP to skip `-` and mask\n 5 characters with `*`, Cloud DLP returns `***-**5-5555`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore\n properties:\n charactersToSkip:\n type: string\n x-dcl-go-name: CharactersToSkip\n description: Characters to not transform when\n masking.\n x-dcl-conflicts:\n - commonCharactersToIgnore\n commonCharactersToIgnore:\n type: string\n x-dcl-go-name: CommonCharactersToIgnore\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnoreEnum\n description: 'Common characters to not transform\n when masking. Useful to avoid removing punctuation.\n Possible values: COMMON_CHARS_TO_IGNORE_UNSPECIFIED,\n NUMERIC, ALPHA_UPPER_CASE, ALPHA_LOWER_CASE,\n PUNCTUATION, WHITESPACE'\n x-dcl-conflicts:\n - charactersToSkip\n enum:\n - COMMON_CHARS_TO_IGNORE_UNSPECIFIED\n - NUMERIC\n - ALPHA_UPPER_CASE\n - ALPHA_LOWER_CASE\n - PUNCTUATION\n - WHITESPACE\n maskingCharacter:\n type: string\n x-dcl-go-name: MaskingCharacter\n description: Character to use to mask the sensitive\n values—for example, `*` for an alphabetic string\n such as a name, or `0` for a numeric string such\n as ZIP code or credit card number. This string must\n have a length of 1. If not supplied, this value\n defaults to `*` for strings, and `0` for digits.\n numberToMask:\n type: integer\n format: int64\n x-dcl-go-name: NumberToMask\n description: Number of characters to mask. If not\n set, all matching chars will be masked. Skipped\n characters do not count towards this tally.\n reverseOrder:\n type: boolean\n x-dcl-go-name: ReverseOrder\n description: Mask characters in reverse order. For\n example, if `masking_character` is `0`, `number_to_mask`\n is `14`, and `reverse_order` is `false`, then the\n input string `1234-5678-9012-3456` is masked as\n `00000000000000-3456`. If `masking_character` is\n `*`, `number_to_mask` is `3`, and `reverse_order`\n is `true`, then the string `12345` is masked as\n `12***`.\n cryptoDeterministicConfig:\n type: object\n x-dcl-go-name: CryptoDeterministicConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig\n description: Deterministic Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext\n description: 'A context may be used for higher security\n and maintaining referential integrity such that\n the same identifier in two different contexts will\n be given a distinct surrogate. The context is appended\n to plaintext value being encrypted. On decryption\n the provided context is validated against the value\n used during encryption. If a context was provided\n during encryption, same context must be provided\n during decryption as well. If the context is not\n set, plaintext would be used as is for encryption.\n If the context is set but: 1. there is no record\n present when transforming a given value or 2. the\n field is not present when transforming a given value,\n plaintext would be used as is for encryption. Note\n that case (1) is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey\n description: The key used by the encryption function.\n For deterministic encryption using AES-SIV, the\n provided key is internally expanded to 64 bytes\n prior to use.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType\n description: 'The custom info type to annotate the\n surrogate with. This annotation will be applied\n to the surrogate by prefixing it with the name of\n the custom info type followed by the number of characters\n comprising the surrogate. The following scheme defines\n the format {info type name}({surrogate character\n count}):{surrogate} For example, if the name of\n custom info type is ''MY_TOKEN_INFO_TYPE'' and the\n surrogate is ''abc'', the full replacement value\n will be: ''MY_TOKEN_INFO_TYPE(3):abc'' This annotation\n identifies the surrogate when inspecting content\n using the custom info type ''Surrogate''. This facilitates\n reversal of the surrogate when it occurs in free\n text. Note: For record transformations where the\n entire cell in a table is being transformed, surrogates\n are not mandatory. Surrogates are used to denote\n the location of the token and are necessary for\n re-identification in free form text. In order for\n inspection to work properly, the name of this info\n type must not occur naturally anywhere in your data;\n otherwise, inspection may either - reverse a surrogate\n that does not correspond to an actual identifier\n - be unable to parse the surrogate and result in\n an error Therefore, choose your custom info type\n name carefully after considering what your data\n looks like. One way to select a name that has a\n high chance of yielding reliable detection is to\n include one or more unicode characters that are\n highly improbable to exist in your data. For example,\n assuming your data is entered from a regular ASCII\n keyboard, the symbol with the hex code point 29DD\n might be used like so: ⧝MY_TOKEN_TYPE.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type. Either\n a name of your choosing when creating a CustomInfoType,\n or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending\n Cloud DLP results to Data Catalog, infoType\n names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n cryptoHashConfig:\n type: object\n x-dcl-go-name: CryptoHashConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig\n description: Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey\n description: The key used by the hash function.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n cryptoReplaceFfxFpeConfig:\n type: object\n x-dcl-go-name: CryptoReplaceFfxFpeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig\n description: Ffx-Fpe\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - cryptoKey\n properties:\n commonAlphabet:\n type: string\n x-dcl-go-name: CommonAlphabet\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabetEnum\n description: 'Common alphabets. Possible values: FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED,\n NUMERIC, HEXADECIMAL, UPPER_CASE_ALPHA_NUMERIC,\n ALPHA_NUMERIC'\n x-dcl-conflicts:\n - customAlphabet\n - radix\n enum:\n - FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED\n - NUMERIC\n - HEXADECIMAL\n - UPPER_CASE_ALPHA_NUMERIC\n - ALPHA_NUMERIC\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext\n description: 'The ''tweak'', a context may be used\n for higher security since the same identifier in\n two different contexts won''t be given the same\n surrogate. If the context is not set, a default\n tweak will be used. If the context is set but: 1.\n there is no record present when transforming a given\n value or 1. the field is not present when transforming\n a given value, a default tweak will be used. Note\n that case (1) is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s. Currently, the referenced field\n may be of value type integer or string. The tweak\n is constructed as a sequence of bytes in big endian\n byte order such that: - a 64 bit integer is encoded\n followed by a single byte of value 1 - a string\n is encoded in UTF-8 format followed by a single\n byte of value 2'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey\n description: Required. The key used by the encryption\n algorithm.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n customAlphabet:\n type: string\n x-dcl-go-name: CustomAlphabet\n description: 'This is supported by mapping these to\n the alphanumeric characters that the FFX mode natively\n supports. This happens before/after encryption/decryption.\n Each character listed must appear only once. Number\n of characters must be in the range [2, 95]. This\n must be encoded as ASCII. The order of characters\n does not matter. The full list of allowed characters\n is: ``0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n ~`!@#$%^&*()_-+={[}]|:;\"''<,>.?/``'\n x-dcl-conflicts:\n - commonAlphabet\n - radix\n radix:\n type: integer\n format: int64\n x-dcl-go-name: Radix\n description: The native way to select the alphabet.\n Must be in the range [2, 95].\n x-dcl-conflicts:\n - commonAlphabet\n - customAlphabet\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType\n description: 'The custom infoType to annotate the\n surrogate with. This annotation will be applied\n to the surrogate by prefixing it with the name of\n the custom infoType followed by the number of characters\n comprising the surrogate. The following scheme defines\n the format: info_type_name(surrogate_character_count):surrogate\n For example, if the name of custom infoType is ''MY_TOKEN_INFO_TYPE''\n and the surrogate is ''abc'', the full replacement\n value will be: ''MY_TOKEN_INFO_TYPE(3):abc'' This\n annotation identifies the surrogate when inspecting\n content using the custom infoType [`SurrogateType`](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype).\n This facilitates reversal of the surrogate when\n it occurs in free text. In order for inspection\n to work properly, the name of this infoType must\n not occur naturally anywhere in your data; otherwise,\n inspection may find a surrogate that does not correspond\n to an actual identifier. Therefore, choose your\n custom infoType name carefully after considering\n what your data looks like. One way to select a name\n that has a high chance of yielding reliable detection\n is to include one or more unicode characters that\n are highly improbable to exist in your data. For\n example, assuming your data is entered from a regular\n ASCII keyboard, the symbol with the hex code point\n 29DD might be used like so: ⧝MY_TOKEN_TYPE'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type. Either\n a name of your choosing when creating a CustomInfoType,\n or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending\n Cloud DLP results to Data Catalog, infoType\n names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n dateShiftConfig:\n type: object\n x-dcl-go-name: DateShiftConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig\n description: Date Shift\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - cryptoDeterministicConfig\n required:\n - upperBoundDays\n - lowerBoundDays\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext\n description: Points to the field that contains the\n context, for example, an entity id. If set, must\n also set cryptoKey. If set, shift will be consistent\n for the given context.\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey\n description: Causes the shift to be computed based\n on this key and the context. This results in the\n same shift for the same context and crypto_key.\n If set, must also set context. Can only be applied\n to table items.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n lowerBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: LowerBoundDays\n description: Required. For example, -5 means shift\n date to at most 5 days back in the past.\n upperBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: UpperBoundDays\n description: Required. Range of shift in days. Actual\n shift will be selected at random within this range\n (inclusive ends). Negative means shift to earlier\n in time. Must not be more than 365250 days (1000\n years) each direction. For example, 3 means shift\n date to at most 3 days into the future.\n fixedSizeBucketingConfig:\n type: object\n x-dcl-go-name: FixedSizeBucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig\n description: Fixed size bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - lowerBound\n - upperBound\n - bucketSize\n properties:\n bucketSize:\n type: number\n format: double\n x-dcl-go-name: BucketSize\n description: 'Required. Size of each bucket (except\n for minimum and maximum buckets). So if `lower_bound`\n = 10, `upper_bound` = 89, and `bucket_size` = 10,\n then the following buckets would be used: -10, 10-20,\n 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89,\n 89+. Precision up to 2 decimals works.'\n lowerBound:\n type: object\n x-dcl-go-name: LowerBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound\n description: Required. Lower bound value of buckets.\n All values less than `lower_bound` are grouped together\n into a single bucket; for example if `lower_bound`\n = 10, then all values less than 10 are replaced\n with the value \"-10\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n upperBound:\n type: object\n x-dcl-go-name: UpperBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound\n description: Required. Upper bound value of buckets.\n All values greater than upper_bound are grouped\n together into a single bucket; for example if `upper_bound`\n = 89, then all values greater than 89 are replaced\n with the value \"89+\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n redactConfig:\n type: object\n x-dcl-go-name: RedactConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig\n description: Redact\n x-dcl-conflicts:\n - replaceConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n replaceConfig:\n type: object\n x-dcl-go-name: ReplaceConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig\n description: Replace with a specified value.\n x-dcl-conflicts:\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n newValue:\n type: object\n x-dcl-go-name: NewValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue\n description: Value to replace it with.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replaceWithInfoTypeConfig:\n type: object\n x-dcl-go-name: ReplaceWithInfoTypeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig\n description: Replace with infotype\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n timePartConfig:\n type: object\n x-dcl-go-name: TimePartConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig\n description: Time extraction\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n partToExtract:\n type: string\n x-dcl-go-name: PartToExtract\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtractEnum\n description: 'The part of the time to keep. Possible\n values: TIME_PART_UNSPECIFIED, YEAR, MONTH, DAY_OF_MONTH,\n DAY_OF_WEEK, WEEK_OF_YEAR, HOUR_OF_DAY'\n enum:\n - TIME_PART_UNSPECIFIED\n - YEAR\n - MONTH\n - DAY_OF_MONTH\n - DAY_OF_WEEK\n - WEEK_OF_YEAR\n - HOUR_OF_DAY\n recordTransformations:\n type: object\n x-dcl-go-name: RecordTransformations\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformations\n description: Treat the dataset as structured. Transformations can be\n applied to specific locations within structured datasets, such as\n transforming a column within a table.\n x-dcl-conflicts:\n - infoTypeTransformations\n properties:\n fieldTransformations:\n type: array\n x-dcl-go-name: FieldTransformations\n description: Transform the record by applying various field transformations.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformations\n required:\n - fields\n properties:\n condition:\n type: object\n x-dcl-go-name: Condition\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsCondition\n description: 'Only apply the transformation if the condition\n evaluates to true for the given `RecordCondition`. The conditions\n are allowed to reference fields that are not used in the\n actual transformation. Example Use Cases: - Apply a different\n bucket transformation to an age column if the zip code column\n for the same record is within a specific range. - Redact\n a field if the date of birth field is greater than 85.'\n properties:\n expressions:\n type: object\n x-dcl-go-name: Expressions\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressions\n description: An expression.\n properties:\n conditions:\n type: object\n x-dcl-go-name: Conditions\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditions\n description: Conditions to apply to the expression.\n properties:\n conditions:\n type: array\n x-dcl-go-name: Conditions\n description: A collection of conditions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditions\n required:\n - field\n - operator\n properties:\n field:\n type: object\n x-dcl-go-name: Field\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsField\n description: Required. Field within the\n record this condition is evaluated against.\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n operator:\n type: string\n x-dcl-go-name: Operator\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsOperatorEnum\n description: 'Required. Operator used to\n compare the field or infoType to the value.\n Possible values: LOGICAL_OPERATOR_UNSPECIFIED,\n AND'\n enum:\n - LOGICAL_OPERATOR_UNSPECIFIED\n - AND\n value:\n type: object\n x-dcl-go-name: Value\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValue\n description: Value to compare against. [Mandatory,\n except for `EXISTS` tests.]\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must\n be from 1 to 31 and valid for\n the year and month, or 0 to specify\n a year by itself or a year and\n month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must\n be from 1 to 12, or 0 to specify\n a year without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24\n hour format. Should be from 0\n to 23. An API may choose to allow\n the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of\n day. Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds\n in nanoseconds. Must be from 0\n to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally be\n from 0 to 59. An API may allow\n the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n logicalOperator:\n type: string\n x-dcl-go-name: LogicalOperator\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsLogicalOperatorEnum\n description: 'The operator to apply to the result\n of conditions. Default and currently only supported\n value is `AND`. Possible values: LOGICAL_OPERATOR_UNSPECIFIED,\n AND'\n enum:\n - LOGICAL_OPERATOR_UNSPECIFIED\n - AND\n fields:\n type: array\n x-dcl-go-name: Fields\n description: Required. Input field(s) to apply the transformation\n to. When you have columns that reference their position\n within a list, omit the index from the FieldId. FieldId\n name matching ignores the index. For example, instead of\n \"contact.nums[0].type\", use \"contact.nums.type\".\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFields\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n infoTypeTransformations:\n type: object\n x-dcl-go-name: InfoTypeTransformations\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformations\n description: Treat the contents of the field as free text,\n and selectively transform content that matches an `InfoType`.\n x-dcl-conflicts:\n - primitiveTransformation\n required:\n - transformations\n properties:\n transformations:\n type: array\n x-dcl-go-name: Transformations\n description: Required. Transformation for each infoType.\n Cannot specify more than one for a given infoType.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformations\n required:\n - primitiveTransformation\n properties:\n infoTypes:\n type: array\n x-dcl-go-name: InfoTypes\n description: InfoTypes to apply the transformation\n to. An empty list will cause this transformation\n to apply to all findings that correspond to infoTypes\n that were requested in `InspectConfig`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypes\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type.\n Either a name of your choosing when creating\n a CustomInfoType, or one of the names listed\n at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending\n Cloud DLP results to Data Catalog, infoType\n names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n primitiveTransformation:\n type: object\n x-dcl-go-name: PrimitiveTransformation\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformation\n description: Required. Primitive transformation\n to apply to the infoType.\n properties:\n bucketingConfig:\n type: object\n x-dcl-go-name: BucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig\n description: Bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n buckets:\n type: array\n x-dcl-go-name: Buckets\n description: Set of buckets. Ranges must\n be non-overlapping.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets\n required:\n - replacementValue\n properties:\n max:\n type: object\n x-dcl-go-name: Max\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax\n description: Upper bound of the range,\n exclusive; type must match min.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month.\n Must be from 1 to 31 and\n valid for the year and month,\n or 0 to specify a year by\n itself or a year and month\n where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year.\n Must be from 1 to 12, or\n 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date.\n Must be from 1 to 9999,\n or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValueEnum\n description: 'day of week Possible\n values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY,\n THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day\n in 24 hour format. Should\n be from 0 to 23. An API\n may choose to allow the\n value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour\n of day. Must be from 0 to\n 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of\n seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally\n be from 0 to 59. An API\n may allow the value 60 if\n it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n min:\n type: object\n x-dcl-go-name: Min\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin\n description: Lower bound of the range,\n inclusive. Type should be the same\n as max if used.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month.\n Must be from 1 to 31 and\n valid for the year and month,\n or 0 to specify a year by\n itself or a year and month\n where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year.\n Must be from 1 to 12, or\n 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date.\n Must be from 1 to 9999,\n or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValueEnum\n description: 'day of week Possible\n values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY,\n THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day\n in 24 hour format. Should\n be from 0 to 23. An API\n may choose to allow the\n value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour\n of day. Must be from 0 to\n 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of\n seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally\n be from 0 to 59. An API\n may allow the value 60 if\n it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replacementValue:\n type: object\n x-dcl-go-name: ReplacementValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue\n description: Required. Replacement\n value for this bucket.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month.\n Must be from 1 to 31 and\n valid for the year and month,\n or 0 to specify a year by\n itself or a year and month\n where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year.\n Must be from 1 to 12, or\n 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date.\n Must be from 1 to 9999,\n or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValueEnum\n description: 'day of week Possible\n values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY,\n THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day\n in 24 hour format. Should\n be from 0 to 23. An API\n may choose to allow the\n value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour\n of day. Must be from 0 to\n 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of\n seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally\n be from 0 to 59. An API\n may allow the value 60 if\n it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n characterMaskConfig:\n type: object\n x-dcl-go-name: CharacterMaskConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig\n description: Mask\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n charactersToIgnore:\n type: array\n x-dcl-go-name: CharactersToIgnore\n description: When masking a string, items\n in this list will be skipped when replacing\n characters. For example, if the input\n string is `555-555-5555` and you instruct\n Cloud DLP to skip `-` and mask 5 characters\n with `*`, Cloud DLP returns `***-**5-5555`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore\n properties:\n charactersToSkip:\n type: string\n x-dcl-go-name: CharactersToSkip\n description: Characters to not transform\n when masking.\n x-dcl-conflicts:\n - commonCharactersToIgnore\n commonCharactersToIgnore:\n type: string\n x-dcl-go-name: CommonCharactersToIgnore\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnoreEnum\n description: 'Common characters to\n not transform when masking. Useful\n to avoid removing punctuation. Possible\n values: COMMON_CHARS_TO_IGNORE_UNSPECIFIED,\n NUMERIC, ALPHA_UPPER_CASE, ALPHA_LOWER_CASE,\n PUNCTUATION, WHITESPACE'\n x-dcl-conflicts:\n - charactersToSkip\n enum:\n - COMMON_CHARS_TO_IGNORE_UNSPECIFIED\n - NUMERIC\n - ALPHA_UPPER_CASE\n - ALPHA_LOWER_CASE\n - PUNCTUATION\n - WHITESPACE\n maskingCharacter:\n type: string\n x-dcl-go-name: MaskingCharacter\n description: Character to use to mask the\n sensitive values—for example, `*` for\n an alphabetic string such as a name, or\n `0` for a numeric string such as ZIP code\n or credit card number. This string must\n have a length of 1. If not supplied, this\n value defaults to `*` for strings, and\n `0` for digits.\n numberToMask:\n type: integer\n format: int64\n x-dcl-go-name: NumberToMask\n description: Number of characters to mask.\n If not set, all matching chars will be\n masked. Skipped characters do not count\n towards this tally.\n reverseOrder:\n type: boolean\n x-dcl-go-name: ReverseOrder\n description: Mask characters in reverse\n order. For example, if `masking_character`\n is `0`, `number_to_mask` is `14`, and\n `reverse_order` is `false`, then the input\n string `1234-5678-9012-3456` is masked\n as `00000000000000-3456`. If `masking_character`\n is `*`, `number_to_mask` is `3`, and `reverse_order`\n is `true`, then the string `12345` is\n masked as `12***`.\n cryptoDeterministicConfig:\n type: object\n x-dcl-go-name: CryptoDeterministicConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig\n description: Deterministic Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext\n description: 'A context may be used for\n higher security and maintaining referential\n integrity such that the same identifier\n in two different contexts will be given\n a distinct surrogate. The context is appended\n to plaintext value being encrypted. On\n decryption the provided context is validated\n against the value used during encryption.\n If a context was provided during encryption,\n same context must be provided during decryption\n as well. If the context is not set, plaintext\n would be used as is for encryption. If\n the context is set but: 1. there is no\n record present when transforming a given\n value or 2. the field is not present when\n transforming a given value, plaintext\n would be used as is for encryption. Note\n that case (1) is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey\n description: The key used by the encryption\n function. For deterministic encryption\n using AES-SIV, the provided key is internally\n expanded to 64 bytes prior to use.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud\n KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource\n name of the KMS CryptoKey to use\n for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped\n data crypto key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of\n the key. This is an arbitrary\n string used to differentiate different\n keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated\n key if their names are the same.\n When the data crypto key is generated,\n this name is not used in any way\n (repeating the api call will result\n in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256\n bit key.\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType\n description: 'The custom info type to annotate\n the surrogate with. This annotation will\n be applied to the surrogate by prefixing\n it with the name of the custom info type\n followed by the number of characters comprising\n the surrogate. The following scheme defines\n the format {info type name}({surrogate\n character count}):{surrogate} For example,\n if the name of custom info type is ''MY_TOKEN_INFO_TYPE''\n and the surrogate is ''abc'', the full\n replacement value will be: ''MY_TOKEN_INFO_TYPE(3):abc''\n This annotation identifies the surrogate\n when inspecting content using the custom\n info type ''Surrogate''. This facilitates\n reversal of the surrogate when it occurs\n in free text. Note: For record transformations\n where the entire cell in a table is being\n transformed, surrogates are not mandatory.\n Surrogates are used to denote the location\n of the token and are necessary for re-identification\n in free form text. In order for inspection\n to work properly, the name of this info\n type must not occur naturally anywhere\n in your data; otherwise, inspection may\n either - reverse a surrogate that does\n not correspond to an actual identifier\n - be unable to parse the surrogate and\n result in an error Therefore, choose your\n custom info type name carefully after\n considering what your data looks like.\n One way to select a name that has a high\n chance of yielding reliable detection\n is to include one or more unicode characters\n that are highly improbable to exist in\n your data. For example, assuming your\n data is entered from a regular ASCII keyboard,\n the symbol with the hex code point 29DD\n might be used like so: ⧝MY_TOKEN_TYPE.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information\n type. Either a name of your choosing\n when creating a CustomInfoType, or\n one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When\n sending Cloud DLP results to Data\n Catalog, infoType names should conform\n to the pattern `[A-Za-z0-9$-_]{1,64}`.\n cryptoHashConfig:\n type: object\n x-dcl-go-name: CryptoHashConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig\n description: Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey\n description: The key used by the hash function.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud\n KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource\n name of the KMS CryptoKey to use\n for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped\n data crypto key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of\n the key. This is an arbitrary\n string used to differentiate different\n keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated\n key if their names are the same.\n When the data crypto key is generated,\n this name is not used in any way\n (repeating the api call will result\n in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256\n bit key.\n cryptoReplaceFfxFpeConfig:\n type: object\n x-dcl-go-name: CryptoReplaceFfxFpeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig\n description: Ffx-Fpe\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - cryptoKey\n properties:\n commonAlphabet:\n type: string\n x-dcl-go-name: CommonAlphabet\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabetEnum\n description: 'Common alphabets. Possible\n values: FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED,\n NUMERIC, HEXADECIMAL, UPPER_CASE_ALPHA_NUMERIC,\n ALPHA_NUMERIC'\n x-dcl-conflicts:\n - customAlphabet\n - radix\n enum:\n - FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED\n - NUMERIC\n - HEXADECIMAL\n - UPPER_CASE_ALPHA_NUMERIC\n - ALPHA_NUMERIC\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext\n description: 'The ''tweak'', a context may\n be used for higher security since the\n same identifier in two different contexts\n won''t be given the same surrogate. If\n the context is not set, a default tweak\n will be used. If the context is set but:\n 1. there is no record present when transforming\n a given value or 1. the field is not present\n when transforming a given value, a default\n tweak will be used. Note that case (1)\n is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s. Currently, the referenced\n field may be of value type integer or\n string. The tweak is constructed as a\n sequence of bytes in big endian byte order\n such that: - a 64 bit integer is encoded\n followed by a single byte of value 1 -\n a string is encoded in UTF-8 format followed\n by a single byte of value 2'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey\n description: Required. The key used by the\n encryption algorithm.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud\n KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource\n name of the KMS CryptoKey to use\n for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped\n data crypto key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of\n the key. This is an arbitrary\n string used to differentiate different\n keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated\n key if their names are the same.\n When the data crypto key is generated,\n this name is not used in any way\n (repeating the api call will result\n in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256\n bit key.\n customAlphabet:\n type: string\n x-dcl-go-name: CustomAlphabet\n description: 'This is supported by mapping\n these to the alphanumeric characters that\n the FFX mode natively supports. This happens\n before/after encryption/decryption. Each\n character listed must appear only once.\n Number of characters must be in the range\n [2, 95]. This must be encoded as ASCII.\n The order of characters does not matter.\n The full list of allowed characters is:\n ``0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n ~`!@#$%^&*()_-+={[}]|:;\"''<,>.?/``'\n x-dcl-conflicts:\n - commonAlphabet\n - radix\n radix:\n type: integer\n format: int64\n x-dcl-go-name: Radix\n description: The native way to select the\n alphabet. Must be in the range [2, 95].\n x-dcl-conflicts:\n - commonAlphabet\n - customAlphabet\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType\n description: 'The custom infoType to annotate\n the surrogate with. This annotation will\n be applied to the surrogate by prefixing\n it with the name of the custom infoType\n followed by the number of characters comprising\n the surrogate. The following scheme defines\n the format: info_type_name(surrogate_character_count):surrogate\n For example, if the name of custom infoType\n is ''MY_TOKEN_INFO_TYPE'' and the surrogate\n is ''abc'', the full replacement value\n will be: ''MY_TOKEN_INFO_TYPE(3):abc''\n This annotation identifies the surrogate\n when inspecting content using the custom\n infoType [`SurrogateType`](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype).\n This facilitates reversal of the surrogate\n when it occurs in free text. In order\n for inspection to work properly, the name\n of this infoType must not occur naturally\n anywhere in your data; otherwise, inspection\n may find a surrogate that does not correspond\n to an actual identifier. Therefore, choose\n your custom infoType name carefully after\n considering what your data looks like.\n One way to select a name that has a high\n chance of yielding reliable detection\n is to include one or more unicode characters\n that are highly improbable to exist in\n your data. For example, assuming your\n data is entered from a regular ASCII keyboard,\n the symbol with the hex code point 29DD\n might be used like so: ⧝MY_TOKEN_TYPE'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information\n type. Either a name of your choosing\n when creating a CustomInfoType, or\n one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When\n sending Cloud DLP results to Data\n Catalog, infoType names should conform\n to the pattern `[A-Za-z0-9$-_]{1,64}`.\n dateShiftConfig:\n type: object\n x-dcl-go-name: DateShiftConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig\n description: Date Shift\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - cryptoDeterministicConfig\n required:\n - upperBoundDays\n - lowerBoundDays\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext\n description: Points to the field that contains\n the context, for example, an entity id.\n If set, must also set cryptoKey. If set,\n shift will be consistent for the given\n context.\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey\n description: Causes the shift to be computed\n based on this key and the context. This\n results in the same shift for the same\n context and crypto_key. If set, must also\n set context. Can only be applied to table\n items.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud\n KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource\n name of the KMS CryptoKey to use\n for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped\n data crypto key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of\n the key. This is an arbitrary\n string used to differentiate different\n keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated\n key if their names are the same.\n When the data crypto key is generated,\n this name is not used in any way\n (repeating the api call will result\n in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256\n bit key.\n lowerBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: LowerBoundDays\n description: Required. For example, -5 means\n shift date to at most 5 days back in the\n past.\n upperBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: UpperBoundDays\n description: Required. Range of shift in\n days. Actual shift will be selected at\n random within this range (inclusive ends).\n Negative means shift to earlier in time.\n Must not be more than 365250 days (1000\n years) each direction. For example, 3\n means shift date to at most 3 days into\n the future.\n fixedSizeBucketingConfig:\n type: object\n x-dcl-go-name: FixedSizeBucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig\n description: Fixed size bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - lowerBound\n - upperBound\n - bucketSize\n properties:\n bucketSize:\n type: number\n format: double\n x-dcl-go-name: BucketSize\n description: 'Required. Size of each bucket\n (except for minimum and maximum buckets).\n So if `lower_bound` = 10, `upper_bound`\n = 89, and `bucket_size` = 10, then the\n following buckets would be used: -10,\n 10-20, 20-30, 30-40, 40-50, 50-60, 60-70,\n 70-80, 80-89, 89+. Precision up to 2 decimals\n works.'\n lowerBound:\n type: object\n x-dcl-go-name: LowerBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound\n description: Required. Lower bound value\n of buckets. All values less than `lower_bound`\n are grouped together into a single bucket;\n for example if `lower_bound` = 10, then\n all values less than 10 are replaced with\n the value \"-10\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must\n be from 1 to 31 and valid for\n the year and month, or 0 to specify\n a year by itself or a year and\n month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must\n be from 1 to 12, or 0 to specify\n a year without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24\n hour format. Should be from 0\n to 23. An API may choose to allow\n the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of\n day. Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds\n in nanoseconds. Must be from 0\n to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally be\n from 0 to 59. An API may allow\n the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n upperBound:\n type: object\n x-dcl-go-name: UpperBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound\n description: Required. Upper bound value\n of buckets. All values greater than upper_bound\n are grouped together into a single bucket;\n for example if `upper_bound` = 89, then\n all values greater than 89 are replaced\n with the value \"89+\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must\n be from 1 to 31 and valid for\n the year and month, or 0 to specify\n a year by itself or a year and\n month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must\n be from 1 to 12, or 0 to specify\n a year without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24\n hour format. Should be from 0\n to 23. An API may choose to allow\n the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of\n day. Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds\n in nanoseconds. Must be from 0\n to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally be\n from 0 to 59. An API may allow\n the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n redactConfig:\n type: object\n x-dcl-go-name: RedactConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig\n description: Redact\n x-dcl-conflicts:\n - replaceConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n replaceConfig:\n type: object\n x-dcl-go-name: ReplaceConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig\n description: Replace with a specified value.\n x-dcl-conflicts:\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n newValue:\n type: object\n x-dcl-go-name: NewValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue\n description: Value to replace it with.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must\n be from 1 to 31 and valid for\n the year and month, or 0 to specify\n a year by itself or a year and\n month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must\n be from 1 to 12, or 0 to specify\n a year without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24\n hour format. Should be from 0\n to 23. An API may choose to allow\n the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of\n day. Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds\n in nanoseconds. Must be from 0\n to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally be\n from 0 to 59. An API may allow\n the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replaceWithInfoTypeConfig:\n type: object\n x-dcl-go-name: ReplaceWithInfoTypeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig\n description: Replace with infotype\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n timePartConfig:\n type: object\n x-dcl-go-name: TimePartConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig\n description: Time extraction\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n partToExtract:\n type: string\n x-dcl-go-name: PartToExtract\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtractEnum\n description: 'The part of the time to keep.\n Possible values: TIME_PART_UNSPECIFIED,\n YEAR, MONTH, DAY_OF_MONTH, DAY_OF_WEEK,\n WEEK_OF_YEAR, HOUR_OF_DAY'\n enum:\n - TIME_PART_UNSPECIFIED\n - YEAR\n - MONTH\n - DAY_OF_MONTH\n - DAY_OF_WEEK\n - WEEK_OF_YEAR\n - HOUR_OF_DAY\n primitiveTransformation:\n type: object\n x-dcl-go-name: PrimitiveTransformation\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformation\n description: Apply the transformation to the entire field.\n x-dcl-conflicts:\n - infoTypeTransformations\n properties:\n bucketingConfig:\n type: object\n x-dcl-go-name: BucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfig\n description: Bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n buckets:\n type: array\n x-dcl-go-name: Buckets\n description: Set of buckets. Ranges must be non-overlapping.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBuckets\n required:\n - replacementValue\n properties:\n max:\n type: object\n x-dcl-go-name: Max\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMax\n description: Upper bound of the range, exclusive;\n type must match min.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n min:\n type: object\n x-dcl-go-name: Min\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMin\n description: Lower bound of the range, inclusive.\n Type should be the same as max if used.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replacementValue:\n type: object\n x-dcl-go-name: ReplacementValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue\n description: Required. Replacement value for\n this bucket.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be\n from 1 to 31 and valid for the year\n and month, or 0 to specify a year\n by itself or a year and month where\n the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be\n from 1 to 12, or 0 to specify a year\n without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour\n format. Should be from 0 to 23. An\n API may choose to allow the value\n \"24:00:00\" for scenarios like business\n closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day.\n Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in\n nanoseconds. Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the\n time. Must normally be from 0 to 59.\n An API may allow the value 60 if it\n allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n characterMaskConfig:\n type: object\n x-dcl-go-name: CharacterMaskConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfig\n description: Mask\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n charactersToIgnore:\n type: array\n x-dcl-go-name: CharactersToIgnore\n description: When masking a string, items in this\n list will be skipped when replacing characters.\n For example, if the input string is `555-555-5555`\n and you instruct Cloud DLP to skip `-` and mask\n 5 characters with `*`, Cloud DLP returns `***-**5-5555`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore\n properties:\n charactersToSkip:\n type: string\n x-dcl-go-name: CharactersToSkip\n description: Characters to not transform when\n masking.\n x-dcl-conflicts:\n - commonCharactersToIgnore\n commonCharactersToIgnore:\n type: string\n x-dcl-go-name: CommonCharactersToIgnore\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnoreEnum\n description: 'Common characters to not transform\n when masking. Useful to avoid removing punctuation.\n Possible values: COMMON_CHARS_TO_IGNORE_UNSPECIFIED,\n NUMERIC, ALPHA_UPPER_CASE, ALPHA_LOWER_CASE,\n PUNCTUATION, WHITESPACE'\n x-dcl-conflicts:\n - charactersToSkip\n enum:\n - COMMON_CHARS_TO_IGNORE_UNSPECIFIED\n - NUMERIC\n - ALPHA_UPPER_CASE\n - ALPHA_LOWER_CASE\n - PUNCTUATION\n - WHITESPACE\n maskingCharacter:\n type: string\n x-dcl-go-name: MaskingCharacter\n description: Character to use to mask the sensitive\n values—for example, `*` for an alphabetic string\n such as a name, or `0` for a numeric string such\n as ZIP code or credit card number. This string must\n have a length of 1. If not supplied, this value\n defaults to `*` for strings, and `0` for digits.\n numberToMask:\n type: integer\n format: int64\n x-dcl-go-name: NumberToMask\n description: Number of characters to mask. If not\n set, all matching chars will be masked. Skipped\n characters do not count towards this tally.\n reverseOrder:\n type: boolean\n x-dcl-go-name: ReverseOrder\n description: Mask characters in reverse order. For\n example, if `masking_character` is `0`, `number_to_mask`\n is `14`, and `reverse_order` is `false`, then the\n input string `1234-5678-9012-3456` is masked as\n `00000000000000-3456`. If `masking_character` is\n `*`, `number_to_mask` is `3`, and `reverse_order`\n is `true`, then the string `12345` is masked as\n `12***`.\n cryptoDeterministicConfig:\n type: object\n x-dcl-go-name: CryptoDeterministicConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfig\n description: Deterministic Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContext\n description: 'A context may be used for higher security\n and maintaining referential integrity such that\n the same identifier in two different contexts will\n be given a distinct surrogate. The context is appended\n to plaintext value being encrypted. On decryption\n the provided context is validated against the value\n used during encryption. If a context was provided\n during encryption, same context must be provided\n during decryption as well. If the context is not\n set, plaintext would be used as is for encryption.\n If the context is set but: 1. there is no record\n present when transforming a given value or 2. the\n field is not present when transforming a given value,\n plaintext would be used as is for encryption. Note\n that case (1) is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey\n description: The key used by the encryption function.\n For deterministic encryption using AES-SIV, the\n provided key is internally expanded to 64 bytes\n prior to use.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType\n description: 'The custom info type to annotate the\n surrogate with. This annotation will be applied\n to the surrogate by prefixing it with the name of\n the custom info type followed by the number of characters\n comprising the surrogate. The following scheme defines\n the format {info type name}({surrogate character\n count}):{surrogate} For example, if the name of\n custom info type is ''MY_TOKEN_INFO_TYPE'' and the\n surrogate is ''abc'', the full replacement value\n will be: ''MY_TOKEN_INFO_TYPE(3):abc'' This annotation\n identifies the surrogate when inspecting content\n using the custom info type ''Surrogate''. This facilitates\n reversal of the surrogate when it occurs in free\n text. Note: For record transformations where the\n entire cell in a table is being transformed, surrogates\n are not mandatory. Surrogates are used to denote\n the location of the token and are necessary for\n re-identification in free form text. In order for\n inspection to work properly, the name of this info\n type must not occur naturally anywhere in your data;\n otherwise, inspection may either - reverse a surrogate\n that does not correspond to an actual identifier\n - be unable to parse the surrogate and result in\n an error Therefore, choose your custom info type\n name carefully after considering what your data\n looks like. One way to select a name that has a\n high chance of yielding reliable detection is to\n include one or more unicode characters that are\n highly improbable to exist in your data. For example,\n assuming your data is entered from a regular ASCII\n keyboard, the symbol with the hex code point 29DD\n might be used like so: ⧝MY_TOKEN_TYPE.'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type. Either\n a name of your choosing when creating a CustomInfoType,\n or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending\n Cloud DLP results to Data Catalog, infoType\n names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n cryptoHashConfig:\n type: object\n x-dcl-go-name: CryptoHashConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfig\n description: Crypto\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey\n description: The key used by the hash function.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n cryptoReplaceFfxFpeConfig:\n type: object\n x-dcl-go-name: CryptoReplaceFfxFpeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig\n description: Ffx-Fpe\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - cryptoKey\n properties:\n commonAlphabet:\n type: string\n x-dcl-go-name: CommonAlphabet\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabetEnum\n description: 'Common alphabets. Possible values: FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED,\n NUMERIC, HEXADECIMAL, UPPER_CASE_ALPHA_NUMERIC,\n ALPHA_NUMERIC'\n x-dcl-conflicts:\n - customAlphabet\n - radix\n enum:\n - FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED\n - NUMERIC\n - HEXADECIMAL\n - UPPER_CASE_ALPHA_NUMERIC\n - ALPHA_NUMERIC\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext\n description: 'The ''tweak'', a context may be used\n for higher security since the same identifier in\n two different contexts won''t be given the same\n surrogate. If the context is not set, a default\n tweak will be used. If the context is set but: 1.\n there is no record present when transforming a given\n value or 1. the field is not present when transforming\n a given value, a default tweak will be used. Note\n that case (1) is expected when an `InfoTypeTransformation`\n is applied to both structured and non-structured\n `ContentItem`s. Currently, the referenced field\n may be of value type integer or string. The tweak\n is constructed as a sequence of bytes in big endian\n byte order such that: - a 64 bit integer is encoded\n followed by a single byte of value 1 - a string\n is encoded in UTF-8 format followed by a single\n byte of value 2'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey\n description: Required. The key used by the encryption\n algorithm.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n customAlphabet:\n type: string\n x-dcl-go-name: CustomAlphabet\n description: 'This is supported by mapping these to\n the alphanumeric characters that the FFX mode natively\n supports. This happens before/after encryption/decryption.\n Each character listed must appear only once. Number\n of characters must be in the range [2, 95]. This\n must be encoded as ASCII. The order of characters\n does not matter. The full list of allowed characters\n is: ``0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n ~`!@#$%^&*()_-+={[}]|:;\"''<,>.?/``'\n x-dcl-conflicts:\n - commonAlphabet\n - radix\n radix:\n type: integer\n format: int64\n x-dcl-go-name: Radix\n description: The native way to select the alphabet.\n Must be in the range [2, 95].\n x-dcl-conflicts:\n - commonAlphabet\n - customAlphabet\n surrogateInfoType:\n type: object\n x-dcl-go-name: SurrogateInfoType\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType\n description: 'The custom infoType to annotate the\n surrogate with. This annotation will be applied\n to the surrogate by prefixing it with the name of\n the custom infoType followed by the number of characters\n comprising the surrogate. The following scheme defines\n the format: info_type_name(surrogate_character_count):surrogate\n For example, if the name of custom infoType is ''MY_TOKEN_INFO_TYPE''\n and the surrogate is ''abc'', the full replacement\n value will be: ''MY_TOKEN_INFO_TYPE(3):abc'' This\n annotation identifies the surrogate when inspecting\n content using the custom infoType [`SurrogateType`](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype).\n This facilitates reversal of the surrogate when\n it occurs in free text. In order for inspection\n to work properly, the name of this infoType must\n not occur naturally anywhere in your data; otherwise,\n inspection may find a surrogate that does not correspond\n to an actual identifier. Therefore, choose your\n custom infoType name carefully after considering\n what your data looks like. One way to select a name\n that has a high chance of yielding reliable detection\n is to include one or more unicode characters that\n are highly improbable to exist in your data. For\n example, assuming your data is entered from a regular\n ASCII keyboard, the symbol with the hex code point\n 29DD might be used like so: ⧝MY_TOKEN_TYPE'\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the information type. Either\n a name of your choosing when creating a CustomInfoType,\n or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference\n when specifying a built-in type. When sending\n Cloud DLP results to Data Catalog, infoType\n names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.\n dateShiftConfig:\n type: object\n x-dcl-go-name: DateShiftConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfig\n description: Date Shift\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - cryptoDeterministicConfig\n required:\n - upperBoundDays\n - lowerBoundDays\n properties:\n context:\n type: object\n x-dcl-go-name: Context\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContext\n description: Points to the field that contains the\n context, for example, an entity id. If set, must\n also set cryptoKey. If set, shift will be consistent\n for the given context.\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n cryptoKey:\n type: object\n x-dcl-go-name: CryptoKey\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKey\n description: Causes the shift to be computed based\n on this key and the context. This results in the\n same shift for the same context and crypto_key.\n If set, must also set context. Can only be applied\n to table items.\n properties:\n kmsWrapped:\n type: object\n x-dcl-go-name: KmsWrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped\n description: Key wrapped using Cloud KMS\n x-dcl-conflicts:\n - transient\n - unwrapped\n required:\n - wrappedKey\n - cryptoKeyName\n properties:\n cryptoKeyName:\n type: string\n x-dcl-go-name: CryptoKeyName\n description: Required. The resource name of\n the KMS CryptoKey to use for unwrapping.\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: name\n wrappedKey:\n type: string\n x-dcl-go-name: WrappedKey\n description: Required. The wrapped data crypto\n key.\n transient:\n type: object\n x-dcl-go-name: Transient\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient\n description: Transient crypto key\n x-dcl-conflicts:\n - unwrapped\n - kmsWrapped\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. Name of the key. This\n is an arbitrary string used to differentiate\n different keys. A unique key is generated\n per name: two separate `TransientCryptoKey`\n protos share the same generated key if their\n names are the same. When the data crypto\n key is generated, this name is not used\n in any way (repeating the api call will\n result in a different key being generated).'\n unwrapped:\n type: object\n x-dcl-go-name: Unwrapped\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped\n description: Unwrapped crypto key\n x-dcl-conflicts:\n - transient\n - kmsWrapped\n required:\n - key\n properties:\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A 128/192/256 bit key.\n lowerBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: LowerBoundDays\n description: Required. For example, -5 means shift\n date to at most 5 days back in the past.\n upperBoundDays:\n type: integer\n format: int64\n x-dcl-go-name: UpperBoundDays\n description: Required. Range of shift in days. Actual\n shift will be selected at random within this range\n (inclusive ends). Negative means shift to earlier\n in time. Must not be more than 365250 days (1000\n years) each direction. For example, 3 means shift\n date to at most 3 days into the future.\n fixedSizeBucketingConfig:\n type: object\n x-dcl-go-name: FixedSizeBucketingConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfig\n description: Fixed size bucketing\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n required:\n - lowerBound\n - upperBound\n - bucketSize\n properties:\n bucketSize:\n type: number\n format: double\n x-dcl-go-name: BucketSize\n description: 'Required. Size of each bucket (except\n for minimum and maximum buckets). So if `lower_bound`\n = 10, `upper_bound` = 89, and `bucket_size` = 10,\n then the following buckets would be used: -10, 10-20,\n 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89,\n 89+. Precision up to 2 decimals works.'\n lowerBound:\n type: object\n x-dcl-go-name: LowerBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound\n description: Required. Lower bound value of buckets.\n All values less than `lower_bound` are grouped together\n into a single bucket; for example if `lower_bound`\n = 10, then all values less than 10 are replaced\n with the value \"-10\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n upperBound:\n type: object\n x-dcl-go-name: UpperBound\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound\n description: Required. Upper bound value of buckets.\n All values greater than upper_bound are grouped\n together into a single bucket; for example if `upper_bound`\n = 89, then all values greater than 89 are replaced\n with the value \"89+\".\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n redactConfig:\n type: object\n x-dcl-go-name: RedactConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationRedactConfig\n description: Redact\n x-dcl-conflicts:\n - replaceConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n replaceConfig:\n type: object\n x-dcl-go-name: ReplaceConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfig\n description: Replace with a specified value.\n x-dcl-conflicts:\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n newValue:\n type: object\n x-dcl-go-name: NewValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValue\n description: Value to replace it with.\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must be from\n 1 to 31 and valid for the year and month,\n or 0 to specify a year by itself or a year\n and month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must be from\n 1 to 12, or 0 to specify a year without\n a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must be from\n 1 to 9999, or 0 to specify a date without\n a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValueEnum\n description: 'day of week Possible values: DAY_OF_WEEK_UNSPECIFIED,\n MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY,\n SATURDAY, SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24 hour format.\n Should be from 0 to 23. An API may choose\n to allow the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of day. Must\n be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds in nanoseconds.\n Must be from 0 to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes of the time.\n Must normally be from 0 to 59. An API may\n allow the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n replaceWithInfoTypeConfig:\n type: object\n x-dcl-go-name: ReplaceWithInfoTypeConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig\n description: Replace with infotype\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - timePartConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n x-dcl-send-empty: true\n timePartConfig:\n type: object\n x-dcl-go-name: TimePartConfig\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfig\n description: Time extraction\n x-dcl-conflicts:\n - replaceConfig\n - redactConfig\n - characterMaskConfig\n - cryptoReplaceFfxFpeConfig\n - fixedSizeBucketingConfig\n - bucketingConfig\n - replaceWithInfoTypeConfig\n - cryptoHashConfig\n - dateShiftConfig\n - cryptoDeterministicConfig\n properties:\n partToExtract:\n type: string\n x-dcl-go-name: PartToExtract\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfigPartToExtractEnum\n description: 'The part of the time to keep. Possible\n values: TIME_PART_UNSPECIFIED, YEAR, MONTH, DAY_OF_MONTH,\n DAY_OF_WEEK, WEEK_OF_YEAR, HOUR_OF_DAY'\n enum:\n - TIME_PART_UNSPECIFIED\n - YEAR\n - MONTH\n - DAY_OF_MONTH\n - DAY_OF_WEEK\n - WEEK_OF_YEAR\n - HOUR_OF_DAY\n recordSuppressions:\n type: array\n x-dcl-go-name: RecordSuppressions\n description: Configuration defining which records get suppressed\n entirely. Records that match any suppression rule are omitted\n from the output.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressions\n properties:\n condition:\n type: object\n x-dcl-go-name: Condition\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsCondition\n description: A condition that when it evaluates to true will\n result in the record being evaluated to be suppressed from\n the transformed content.\n properties:\n expressions:\n type: object\n x-dcl-go-name: Expressions\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressions\n description: An expression.\n properties:\n conditions:\n type: object\n x-dcl-go-name: Conditions\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditions\n description: Conditions to apply to the expression.\n properties:\n conditions:\n type: array\n x-dcl-go-name: Conditions\n description: A collection of conditions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditions\n required:\n - field\n - operator\n properties:\n field:\n type: object\n x-dcl-go-name: Field\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsField\n description: Required. Field within the\n record this condition is evaluated against.\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name describing the field.\n operator:\n type: string\n x-dcl-go-name: Operator\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsOperatorEnum\n description: 'Required. Operator used to\n compare the field or infoType to the value.\n Possible values: LOGICAL_OPERATOR_UNSPECIFIED,\n AND'\n enum:\n - LOGICAL_OPERATOR_UNSPECIFIED\n - AND\n value:\n type: object\n x-dcl-go-name: Value\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValue\n description: Value to compare against. [Mandatory,\n except for `EXISTS` tests.]\n properties:\n booleanValue:\n type: boolean\n x-dcl-go-name: BooleanValue\n description: boolean\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n dateValue:\n type: object\n x-dcl-go-name: DateValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValue\n description: date\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dayOfWeekValue\n properties:\n day:\n type: integer\n format: int64\n x-dcl-go-name: Day\n description: Day of a month. Must\n be from 1 to 31 and valid for\n the year and month, or 0 to specify\n a year by itself or a year and\n month where the day isn't significant.\n month:\n type: integer\n format: int64\n x-dcl-go-name: Month\n description: Month of a year. Must\n be from 1 to 12, or 0 to specify\n a year without a month and day.\n year:\n type: integer\n format: int64\n x-dcl-go-name: Year\n description: Year of the date. Must\n be from 1 to 9999, or 0 to specify\n a date without a year.\n dayOfWeekValue:\n type: string\n x-dcl-go-name: DayOfWeekValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDayOfWeekValueEnum\n description: 'day of week Possible values:\n DAY_OF_WEEK_UNSPECIFIED, MONDAY, TUESDAY,\n WEDNESDAY, THURSDAY, FRIDAY, SATURDAY,\n SUNDAY'\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n enum:\n - DAY_OF_WEEK_UNSPECIFIED\n - MONDAY\n - TUESDAY\n - WEDNESDAY\n - THURSDAY\n - FRIDAY\n - SATURDAY\n - SUNDAY\n floatValue:\n type: number\n format: double\n x-dcl-go-name: FloatValue\n description: float\n x-dcl-conflicts:\n - integerValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n integerValue:\n type: integer\n format: int64\n x-dcl-go-name: IntegerValue\n description: integer\n x-dcl-conflicts:\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n stringValue:\n type: string\n x-dcl-go-name: StringValue\n description: string\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - booleanValue\n - timestampValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n timeValue:\n type: object\n x-dcl-go-name: TimeValue\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValue\n description: time of day\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timestampValue\n - dateValue\n - dayOfWeekValue\n properties:\n hours:\n type: integer\n format: int64\n x-dcl-go-name: Hours\n description: Hours of day in 24\n hour format. Should be from 0\n to 23. An API may choose to allow\n the value \"24:00:00\" for scenarios\n like business closing time.\n minutes:\n type: integer\n format: int64\n x-dcl-go-name: Minutes\n description: Minutes of hour of\n day. Must be from 0 to 59.\n nanos:\n type: integer\n format: int64\n x-dcl-go-name: Nanos\n description: Fractions of seconds\n in nanoseconds. Must be from 0\n to 999,999,999.\n seconds:\n type: integer\n format: int64\n x-dcl-go-name: Seconds\n description: Seconds of minutes\n of the time. Must normally be\n from 0 to 59. An API may allow\n the value 60 if it allows leap-seconds.\n timestampValue:\n type: string\n format: date-time\n x-dcl-go-name: TimestampValue\n description: timestamp\n x-dcl-conflicts:\n - integerValue\n - floatValue\n - stringValue\n - booleanValue\n - timeValue\n - dateValue\n - dayOfWeekValue\n logicalOperator:\n type: string\n x-dcl-go-name: LogicalOperator\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsLogicalOperatorEnum\n description: 'The operator to apply to the result\n of conditions. Default and currently only supported\n value is `AND`. Possible values: LOGICAL_OPERATOR_UNSPECIFIED,\n AND'\n enum:\n - LOGICAL_OPERATOR_UNSPECIFIED\n - AND\n transformationErrorHandling:\n type: object\n x-dcl-go-name: TransformationErrorHandling\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigTransformationErrorHandling\n description: Mode for handling transformation errors. If left unspecified,\n the default mode is `TransformationErrorHandling.ThrowError`.\n properties:\n leaveUntransformed:\n type: object\n x-dcl-go-name: LeaveUntransformed\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigTransformationErrorHandlingLeaveUntransformed\n description: Ignore errors\n x-dcl-conflicts:\n - throwError\n throwError:\n type: object\n x-dcl-go-name: ThrowError\n x-dcl-go-type: DeidentifyTemplateDeidentifyConfigTransformationErrorHandlingThrowError\n description: Throw an error\n x-dcl-conflicts:\n - leaveUntransformed\n description:\n type: string\n x-dcl-go-name: Description\n description: Short description (max 256 chars).\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: Display name (max 256 chars).\n location:\n type: string\n x-dcl-go-name: Location\n description: The location of the resource\n x-kubernetes-immutable: true\n locationId:\n type: string\n x-dcl-go-name: LocationId\n readOnly: true\n description: Output only. The geographic location where this resource is\n stored.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Output only. The template name. The template will have one\n of the following formats: `projects/PROJECT_ID/deidentifyTemplates/TEMPLATE_ID`\n OR `organizations/ORGANIZATION_ID/deidentifyTemplates/TEMPLATE_ID`'\n x-kubernetes-immutable: true\n x-dcl-server-generated-parameter: true\n parent:\n type: string\n x-dcl-go-name: Parent\n description: The parent of the resource\n x-kubernetes-immutable: true\n x-dcl-forward-slash-allowed: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Organization\n field: name\n parent: true\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last update timestamp of an inspectTemplate.\n x-kubernetes-immutable: true\n") -// 424961 bytes -// MD5: 714cc1501bac97758bcd52efffc78085 +// 424958 bytes +// MD5: ec4166bd07d1f40b0e519c052e742b17 diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_internal.go index 7a2d667b05..4ba046d319 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_internal.go @@ -2097,11 +2097,8 @@ func (op *createDeidentifyTemplateOperation) do(ctx context.Context, r *Deidenti op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetDeidentifyTemplate(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_schema.go index ecb61447ac..d2eb467e0f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/deidentify_template_schema.go @@ -943,7 +943,7 @@ func DCLDeidentifyTemplateSchema() *dcl.Schema { Type: "object", GoName: "SurrogateInfoType", GoType: "DeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType", - Description: "The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} For example, if the name of custom info type is 'MY_TOKEN_INFO_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY_TOKEN_INFO_TYPE(3):abc' This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either - reverse a surrogate that does not correspond to an actual identifier - be unable to parse the surrogate and result in an error Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY_TOKEN_TYPE.", + Description: "The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format {info type name}({surrogate character count}):{surrogate} For example, if the name of custom info type is 'MY_TOKEN_INFO_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY_TOKEN_INFO_TYPE(3):abc' This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either - reverse a surrogate that does not correspond to an actual identifier - be unable to parse the surrogate and result in an error Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY_TOKEN_TYPE.", Properties: map[string]*dcl.Property{ "name": &dcl.Property{ Type: "string", @@ -3143,7 +3143,7 @@ func DCLDeidentifyTemplateSchema() *dcl.Schema { Type: "object", GoName: "SurrogateInfoType", GoType: "DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType", - Description: "The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} For example, if the name of custom info type is 'MY_TOKEN_INFO_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY_TOKEN_INFO_TYPE(3):abc' This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either - reverse a surrogate that does not correspond to an actual identifier - be unable to parse the surrogate and result in an error Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY_TOKEN_TYPE.", + Description: "The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format {info type name}({surrogate character count}):{surrogate} For example, if the name of custom info type is 'MY_TOKEN_INFO_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY_TOKEN_INFO_TYPE(3):abc' This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either - reverse a surrogate that does not correspond to an actual identifier - be unable to parse the surrogate and result in an error Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY_TOKEN_TYPE.", Properties: map[string]*dcl.Property{ "name": &dcl.Property{ Type: "string", @@ -5007,7 +5007,7 @@ func DCLDeidentifyTemplateSchema() *dcl.Schema { Type: "object", GoName: "SurrogateInfoType", GoType: "DeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType", - Description: "The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} For example, if the name of custom info type is 'MY_TOKEN_INFO_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY_TOKEN_INFO_TYPE(3):abc' This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either - reverse a surrogate that does not correspond to an actual identifier - be unable to parse the surrogate and result in an error Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY_TOKEN_TYPE.", + Description: "The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format {info type name}({surrogate character count}):{surrogate} For example, if the name of custom info type is 'MY_TOKEN_INFO_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY_TOKEN_INFO_TYPE(3):abc' This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either - reverse a surrogate that does not correspond to an actual identifier - be unable to parse the surrogate and result in an error Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY_TOKEN_TYPE.", Properties: map[string]*dcl.Property{ "name": &dcl.Property{ Type: "string", diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/inspect_template.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/inspect_template.go index d6292ba314..d900eb53e5 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/inspect_template.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/inspect_template.go @@ -1649,10 +1649,9 @@ func (c *Client) GetInspectTemplate(ctx context.Context, r *InspectTemplate) (*I if err != nil { return nil, err } - nr := r.urlNormalized() - result.Location = nr.Location - result.Parent = nr.Parent - result.Name = nr.Name + result.Location = r.Location + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/inspect_template_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/inspect_template_internal.go index 1ea442413f..151c791160 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/inspect_template_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/inspect_template_internal.go @@ -493,11 +493,8 @@ func (op *createInspectTemplateOperation) do(ctx context.Context, r *InspectTemp op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetInspectTemplate(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/job_trigger.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/job_trigger.go index 87db8ab3cb..022fbaa873 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/job_trigger.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/job_trigger.go @@ -3730,10 +3730,9 @@ func (c *Client) GetJobTrigger(ctx context.Context, r *JobTrigger) (*JobTrigger, if err != nil { return nil, err } - nr := r.urlNormalized() - result.Location = nr.Location - result.Parent = nr.Parent - result.Name = nr.Name + result.Location = r.Location + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/job_trigger_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/job_trigger_internal.go index dcdb7714f7..377d0ec540 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/job_trigger_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/job_trigger_internal.go @@ -801,11 +801,8 @@ func (op *createJobTriggerOperation) do(ctx context.Context, r *JobTrigger, c *C op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetJobTrigger(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/stored_info_type.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/stored_info_type.go index 2dfd9de81b..3d7079db73 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/stored_info_type.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/stored_info_type.go @@ -647,10 +647,9 @@ func (c *Client) GetStoredInfoType(ctx context.Context, r *StoredInfoType) (*Sto if err != nil { return nil, err } - nr := r.urlNormalized() - result.Location = nr.Location - result.Parent = nr.Parent - result.Name = nr.Name + result.Location = r.Location + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/stored_info_type_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/stored_info_type_internal.go index cfc2969f54..f58e17eedb 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/stored_info_type_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/beta/stored_info_type_internal.go @@ -393,11 +393,8 @@ func (op *createStoredInfoTypeOperation) do(ctx context.Context, r *StoredInfoTy op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetStoredInfoType(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/beta/trigger.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/beta/trigger.go index e372369bca..c9e50b65ea 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/beta/trigger.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/beta/trigger.go @@ -473,10 +473,9 @@ func (c *Client) GetTrigger(ctx context.Context, r *Trigger) (*Trigger, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/filestore/beta/backup.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/filestore/beta/backup.go index 047e5b782d..3c63009c4e 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/filestore/beta/backup.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/filestore/beta/backup.go @@ -215,10 +215,9 @@ func (c *Client) GetBackup(ctx context.Context, r *Backup) (*Backup, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/filestore/beta/instance.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/filestore/beta/instance.go index 1480a03c58..cbf150967a 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/filestore/beta/instance.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/filestore/beta/instance.go @@ -464,10 +464,9 @@ func (c *Client) GetInstance(ctx context.Context, r *Instance) (*Instance, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/release.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/release.go index a22b482a04..971d121ff2 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/release.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/release.go @@ -146,9 +146,8 @@ func (c *Client) GetRelease(ctx context.Context, r *Release) (*Release, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/ruleset.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/ruleset.go index e27caafcb9..f795efe205 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/ruleset.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/ruleset.go @@ -320,9 +320,8 @@ func (c *Client) GetRuleset(ctx context.Context, r *Ruleset) (*Ruleset, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/ruleset_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/ruleset_internal.go index 3697821742..ccf79ffd2a 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/ruleset_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta/ruleset_internal.go @@ -248,11 +248,8 @@ func (op *createRulesetOperation) do(ctx context.Context, r *Ruleset, c *Client) op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetRuleset(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gameservices/realm.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gameservices/realm.go index 9de1839f17..0884cf8993 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gameservices/realm.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gameservices/realm.go @@ -151,10 +151,9 @@ func (c *Client) GetRealm(ctx context.Context, r *Realm) (*Realm, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/feature.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/feature.go index 5c3636accb..d993372073 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/feature.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/feature.go @@ -450,10 +450,9 @@ func (c *Client) GetFeature(ctx context.Context, r *Feature) (*Feature, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership.go index 64710458cf..212f93231a 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership.go @@ -681,10 +681,9 @@ func (c *Client) GetMembership(ctx context.Context, r *Membership) (*Membership, if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership.yaml index addb1e7070..5ef3b68312 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership.yaml @@ -104,12 +104,12 @@ components: type: string x-dcl-go-name: WorkloadIdentityPool readOnly: true - description: 'Output only. The name of the workload identity pool in + description: Output only. The name of the workload identity pool in which `issuer` will be recognized. There is a single Workload Identity Pool per Hub that is shared between all Memberships that belong to - that Hub. For a Hub hosted in: {PROJECT_ID}, the workload pool format + that Hub. For a Hub hosted in {PROJECT_ID}, the workload pool format is `{PROJECT_ID}.hub.id.goog`, although this is subject to change - in newer versions of this API.' + in newer versions of this API. createTime: type: string format: date-time diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership_beta_yaml_embed.go index 60047170be..02b66e736e 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/gkehub/beta/membership.yaml -var YAML_membership = []byte("info:\n title: GkeHub/Membership\n description: The GkeHub Membership resource\n x-dcl-struct-name: Membership\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Membership\n parameters:\n - name: Membership\n required: true\n description: A full instance of a Membership\n apply:\n description: The function used to apply information about a Membership\n parameters:\n - name: Membership\n required: true\n description: A full instance of a Membership\n delete:\n description: The function used to delete a Membership\n parameters:\n - name: Membership\n required: true\n description: A full instance of a Membership\n deleteAll:\n description: The function used to delete all Membership\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Membership\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Membership:\n title: Membership\n x-dcl-id: projects/{{project}}/locations/{{location}}/memberships/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n authority:\n type: object\n x-dcl-go-name: Authority\n x-dcl-go-type: MembershipAuthority\n description: 'Optional. How to identify workloads from this Membership.\n See the documentation on Workload Identity for more details: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity'\n properties:\n identityProvider:\n type: string\n x-dcl-go-name: IdentityProvider\n readOnly: true\n description: Output only. An identity provider that reflects the `issuer`\n in the workload identity pool.\n issuer:\n type: string\n x-dcl-go-name: Issuer\n description: Optional. A JSON Web Token (JWT) issuer URI. `issuer` must\n start with `https://` and be a valid URL with length <2000 characters.\n If set, then Google will allow valid OIDC tokens from this issuer\n to authenticate within the workload_identity_pool. OIDC discovery\n will be performed on this URI to validate tokens from the issuer.\n Clearing `issuer` disables Workload Identity. `issuer` cannot be directly\n modified; it must be cleared (and Workload Identity disabled) before\n using a new issuer (and re-enabling Workload Identity).\n workloadIdentityPool:\n type: string\n x-dcl-go-name: WorkloadIdentityPool\n readOnly: true\n description: 'Output only. The name of the workload identity pool in\n which `issuer` will be recognized. There is a single Workload Identity\n Pool per Hub that is shared between all Memberships that belong to\n that Hub. For a Hub hosted in: {PROJECT_ID}, the workload pool format\n is `{PROJECT_ID}.hub.id.goog`, although this is subject to change\n in newer versions of this API.'\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. When the Membership was created.\n x-kubernetes-immutable: true\n deleteTime:\n type: string\n format: date-time\n x-dcl-go-name: DeleteTime\n readOnly: true\n description: Output only. When the Membership was deleted.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: 'Description of this membership, limited to 63 characters.\n Must match the regex: `*` This field is present for legacy purposes.'\n endpoint:\n type: object\n x-dcl-go-name: Endpoint\n x-dcl-go-type: MembershipEndpoint\n description: Optional. Endpoint information to reach this member.\n properties:\n gkeCluster:\n type: object\n x-dcl-go-name: GkeCluster\n x-dcl-go-type: MembershipEndpointGkeCluster\n description: Optional. GKE-specific information. Only present if this\n Membership is a GKE cluster.\n properties:\n resourceLink:\n type: string\n x-dcl-go-name: ResourceLink\n description: 'Immutable. Self-link of the GCP resource for the GKE\n cluster. For example: //container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster\n Zonal clusters are also supported.'\n x-dcl-references:\n - resource: Container/Cluster\n field: selfLink\n kubernetesMetadata:\n type: object\n x-dcl-go-name: KubernetesMetadata\n x-dcl-go-type: MembershipEndpointKubernetesMetadata\n readOnly: true\n description: Output only. Useful Kubernetes-specific metadata.\n properties:\n kubernetesApiServerVersion:\n type: string\n x-dcl-go-name: KubernetesApiServerVersion\n readOnly: true\n description: Output only. Kubernetes API server version string as\n reported by `/version`.\n memoryMb:\n type: integer\n format: int64\n x-dcl-go-name: MemoryMb\n readOnly: true\n description: Output only. The total memory capacity as reported\n by the sum of all Kubernetes nodes resources, defined in MB.\n nodeCount:\n type: integer\n format: int64\n x-dcl-go-name: NodeCount\n readOnly: true\n description: Output only. Node count as reported by Kubernetes nodes\n resources.\n nodeProviderId:\n type: string\n x-dcl-go-name: NodeProviderId\n readOnly: true\n description: Output only. Node providerID as reported by the first\n node in the list of nodes on the Kubernetes endpoint. On Kubernetes\n platforms that support zero-node clusters (like GKE-on-GCP), the\n node_count will be zero and the node_provider_id will be empty.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which these details were last\n updated. This update_time is different from the Membership-level\n update_time since EndpointDetails are updated internally for API\n consumers.\n vcpuCount:\n type: integer\n format: int64\n x-dcl-go-name: VcpuCount\n readOnly: true\n description: Output only. vCPU count as reported by Kubernetes nodes\n resources.\n kubernetesResource:\n type: object\n x-dcl-go-name: KubernetesResource\n x-dcl-go-type: MembershipEndpointKubernetesResource\n description: 'Optional. The in-cluster Kubernetes Resources that should\n be applied for a correctly registered cluster, in the steady state.\n These resources: * Ensure that the cluster is exclusively registered\n to one and only one Hub Membership. * Propagate Workload Pool Information\n available in the Membership Authority field. * Ensure proper initial\n configuration of default Hub Features.'\n properties:\n connectResources:\n type: array\n x-dcl-go-name: ConnectResources\n readOnly: true\n description: Output only. The Kubernetes resources for installing\n the GKE Connect agent This field is only populated in the Membership\n returned from a successful long-running operation from CreateMembership\n or UpdateMembership. It is not populated during normal GetMembership\n or ListMemberships requests. To get the resource manifest after\n the initial registration, the caller should make a UpdateMembership\n call with an empty field mask.\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: MembershipEndpointKubernetesResourceConnectResources\n properties:\n clusterScoped:\n type: boolean\n x-dcl-go-name: ClusterScoped\n description: Whether the resource provided in the manifest\n is `cluster_scoped`. If unset, the manifest is assumed to\n be namespace scoped. This field is used for REST mapping\n when applying the resource in a cluster.\n manifest:\n type: string\n x-dcl-go-name: Manifest\n description: YAML manifest of the resource.\n membershipCrManifest:\n type: string\n x-dcl-go-name: MembershipCrManifest\n description: Input only. The YAML representation of the Membership\n CR. This field is ignored for GKE clusters where Hub can read\n the CR directly. Callers should provide the CR that is currently\n present in the cluster during CreateMembership or UpdateMembership,\n or leave this field empty if none exists. The CR manifest is used\n to validate the cluster has not been registered with another Membership.\n x-dcl-mutable-unreadable: true\n membershipResources:\n type: array\n x-dcl-go-name: MembershipResources\n readOnly: true\n description: Output only. Additional Kubernetes resources that need\n to be applied to the cluster after Membership creation, and after\n every update. This field is only populated in the Membership returned\n from a successful long-running operation from CreateMembership\n or UpdateMembership. It is not populated during normal GetMembership\n or ListMemberships requests. To get the resource manifest after\n the initial registration, the caller should make a UpdateMembership\n call with an empty field mask.\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: MembershipEndpointKubernetesResourceMembershipResources\n properties:\n clusterScoped:\n type: boolean\n x-dcl-go-name: ClusterScoped\n description: Whether the resource provided in the manifest\n is `cluster_scoped`. If unset, the manifest is assumed to\n be namespace scoped. This field is used for REST mapping\n when applying the resource in a cluster.\n manifest:\n type: string\n x-dcl-go-name: Manifest\n description: YAML manifest of the resource.\n resourceOptions:\n type: object\n x-dcl-go-name: ResourceOptions\n x-dcl-go-type: MembershipEndpointKubernetesResourceResourceOptions\n description: Optional. Options for Kubernetes resource generation.\n properties:\n connectVersion:\n type: string\n x-dcl-go-name: ConnectVersion\n description: Optional. The Connect agent version to use for\n connect_resources. Defaults to the latest GKE Connect version.\n The version must be a currently supported version, obsolete\n versions will be rejected.\n v1beta1Crd:\n type: boolean\n x-dcl-go-name: V1Beta1Crd\n description: Optional. Use `apiextensions/v1beta1` instead of\n `apiextensions/v1` for CustomResourceDefinition resources.\n This option should be set for clusters with Kubernetes apiserver\n versions <1.16.\n externalId:\n type: string\n x-dcl-go-name: ExternalId\n description: 'Optional. An externally-generated and managed ID for this\n Membership. This ID may be modified after creation, but this is not recommended.\n The ID must match the regex: `*` If this Membership represents a Kubernetes\n cluster, this value should be set to the UID of the `kube-system` namespace\n object.'\n infrastructureType:\n type: string\n x-dcl-go-name: InfrastructureType\n x-dcl-go-type: MembershipInfrastructureTypeEnum\n description: 'Optional. The infrastructure type this Membership is running\n on. Possible values: INFRASTRUCTURE_TYPE_UNSPECIFIED, ON_PREM, MULTI_CLOUD'\n enum:\n - INFRASTRUCTURE_TYPE_UNSPECIFIED\n - ON_PREM\n - MULTI_CLOUD\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. GCP labels for this membership.\n lastConnectionTime:\n type: string\n format: date-time\n x-dcl-go-name: LastConnectionTime\n readOnly: true\n description: Output only. For clusters using Connect, the timestamp of the\n most recent connection established with Google Cloud. This time is updated\n every several minutes, not continuously. For clusters that do not use\n GKE Connect, or that have never connected successfully, this field will\n be unset.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Output only. The full, unique name of this Membership resource\n in the format `projects/*/locations/*/memberships/{membership_id}`, set\n during creation. `membership_id` must be a valid RFC 1123 compliant DNS\n label: 1. At most 63 characters in length 2. It must consist of lower\n case alphanumeric characters or `-` 3. It must start and end with an alphanumeric\n character Which can be expressed as the regex: `)?`, with a maximum length\n of 63 characters.'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n state:\n type: object\n x-dcl-go-name: State\n x-dcl-go-type: MembershipState\n readOnly: true\n description: Output only. State of the Membership resource.\n x-kubernetes-immutable: true\n properties:\n code:\n type: string\n x-dcl-go-name: Code\n x-dcl-go-type: MembershipStateCodeEnum\n readOnly: true\n description: 'Output only. The current state of the Membership resource.\n Possible values: CODE_UNSPECIFIED, CREATING, READY, DELETING, UPDATING,\n SERVICE_UPDATING'\n x-kubernetes-immutable: true\n enum:\n - CODE_UNSPECIFIED\n - CREATING\n - READY\n - DELETING\n - UPDATING\n - SERVICE_UPDATING\n uniqueId:\n type: string\n x-dcl-go-name: UniqueId\n readOnly: true\n description: Output only. Google-generated UUID for this resource. This\n is unique across all Membership resources. If a Membership resource is\n deleted and another resource with the same name is created, it gets a\n different unique_id.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. When the Membership was last updated.\n x-kubernetes-immutable: true\n") +var YAML_membership = []byte("info:\n title: GkeHub/Membership\n description: The GkeHub Membership resource\n x-dcl-struct-name: Membership\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Membership\n parameters:\n - name: Membership\n required: true\n description: A full instance of a Membership\n apply:\n description: The function used to apply information about a Membership\n parameters:\n - name: Membership\n required: true\n description: A full instance of a Membership\n delete:\n description: The function used to delete a Membership\n parameters:\n - name: Membership\n required: true\n description: A full instance of a Membership\n deleteAll:\n description: The function used to delete all Membership\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Membership\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Membership:\n title: Membership\n x-dcl-id: projects/{{project}}/locations/{{location}}/memberships/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n authority:\n type: object\n x-dcl-go-name: Authority\n x-dcl-go-type: MembershipAuthority\n description: 'Optional. How to identify workloads from this Membership.\n See the documentation on Workload Identity for more details: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity'\n properties:\n identityProvider:\n type: string\n x-dcl-go-name: IdentityProvider\n readOnly: true\n description: Output only. An identity provider that reflects the `issuer`\n in the workload identity pool.\n issuer:\n type: string\n x-dcl-go-name: Issuer\n description: Optional. A JSON Web Token (JWT) issuer URI. `issuer` must\n start with `https://` and be a valid URL with length <2000 characters.\n If set, then Google will allow valid OIDC tokens from this issuer\n to authenticate within the workload_identity_pool. OIDC discovery\n will be performed on this URI to validate tokens from the issuer.\n Clearing `issuer` disables Workload Identity. `issuer` cannot be directly\n modified; it must be cleared (and Workload Identity disabled) before\n using a new issuer (and re-enabling Workload Identity).\n workloadIdentityPool:\n type: string\n x-dcl-go-name: WorkloadIdentityPool\n readOnly: true\n description: Output only. The name of the workload identity pool in\n which `issuer` will be recognized. There is a single Workload Identity\n Pool per Hub that is shared between all Memberships that belong to\n that Hub. For a Hub hosted in {PROJECT_ID}, the workload pool format\n is `{PROJECT_ID}.hub.id.goog`, although this is subject to change\n in newer versions of this API.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. When the Membership was created.\n x-kubernetes-immutable: true\n deleteTime:\n type: string\n format: date-time\n x-dcl-go-name: DeleteTime\n readOnly: true\n description: Output only. When the Membership was deleted.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: 'Description of this membership, limited to 63 characters.\n Must match the regex: `*` This field is present for legacy purposes.'\n endpoint:\n type: object\n x-dcl-go-name: Endpoint\n x-dcl-go-type: MembershipEndpoint\n description: Optional. Endpoint information to reach this member.\n properties:\n gkeCluster:\n type: object\n x-dcl-go-name: GkeCluster\n x-dcl-go-type: MembershipEndpointGkeCluster\n description: Optional. GKE-specific information. Only present if this\n Membership is a GKE cluster.\n properties:\n resourceLink:\n type: string\n x-dcl-go-name: ResourceLink\n description: 'Immutable. Self-link of the GCP resource for the GKE\n cluster. For example: //container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster\n Zonal clusters are also supported.'\n x-dcl-references:\n - resource: Container/Cluster\n field: selfLink\n kubernetesMetadata:\n type: object\n x-dcl-go-name: KubernetesMetadata\n x-dcl-go-type: MembershipEndpointKubernetesMetadata\n readOnly: true\n description: Output only. Useful Kubernetes-specific metadata.\n properties:\n kubernetesApiServerVersion:\n type: string\n x-dcl-go-name: KubernetesApiServerVersion\n readOnly: true\n description: Output only. Kubernetes API server version string as\n reported by `/version`.\n memoryMb:\n type: integer\n format: int64\n x-dcl-go-name: MemoryMb\n readOnly: true\n description: Output only. The total memory capacity as reported\n by the sum of all Kubernetes nodes resources, defined in MB.\n nodeCount:\n type: integer\n format: int64\n x-dcl-go-name: NodeCount\n readOnly: true\n description: Output only. Node count as reported by Kubernetes nodes\n resources.\n nodeProviderId:\n type: string\n x-dcl-go-name: NodeProviderId\n readOnly: true\n description: Output only. Node providerID as reported by the first\n node in the list of nodes on the Kubernetes endpoint. On Kubernetes\n platforms that support zero-node clusters (like GKE-on-GCP), the\n node_count will be zero and the node_provider_id will be empty.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which these details were last\n updated. This update_time is different from the Membership-level\n update_time since EndpointDetails are updated internally for API\n consumers.\n vcpuCount:\n type: integer\n format: int64\n x-dcl-go-name: VcpuCount\n readOnly: true\n description: Output only. vCPU count as reported by Kubernetes nodes\n resources.\n kubernetesResource:\n type: object\n x-dcl-go-name: KubernetesResource\n x-dcl-go-type: MembershipEndpointKubernetesResource\n description: 'Optional. The in-cluster Kubernetes Resources that should\n be applied for a correctly registered cluster, in the steady state.\n These resources: * Ensure that the cluster is exclusively registered\n to one and only one Hub Membership. * Propagate Workload Pool Information\n available in the Membership Authority field. * Ensure proper initial\n configuration of default Hub Features.'\n properties:\n connectResources:\n type: array\n x-dcl-go-name: ConnectResources\n readOnly: true\n description: Output only. The Kubernetes resources for installing\n the GKE Connect agent This field is only populated in the Membership\n returned from a successful long-running operation from CreateMembership\n or UpdateMembership. It is not populated during normal GetMembership\n or ListMemberships requests. To get the resource manifest after\n the initial registration, the caller should make a UpdateMembership\n call with an empty field mask.\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: MembershipEndpointKubernetesResourceConnectResources\n properties:\n clusterScoped:\n type: boolean\n x-dcl-go-name: ClusterScoped\n description: Whether the resource provided in the manifest\n is `cluster_scoped`. If unset, the manifest is assumed to\n be namespace scoped. This field is used for REST mapping\n when applying the resource in a cluster.\n manifest:\n type: string\n x-dcl-go-name: Manifest\n description: YAML manifest of the resource.\n membershipCrManifest:\n type: string\n x-dcl-go-name: MembershipCrManifest\n description: Input only. The YAML representation of the Membership\n CR. This field is ignored for GKE clusters where Hub can read\n the CR directly. Callers should provide the CR that is currently\n present in the cluster during CreateMembership or UpdateMembership,\n or leave this field empty if none exists. The CR manifest is used\n to validate the cluster has not been registered with another Membership.\n x-dcl-mutable-unreadable: true\n membershipResources:\n type: array\n x-dcl-go-name: MembershipResources\n readOnly: true\n description: Output only. Additional Kubernetes resources that need\n to be applied to the cluster after Membership creation, and after\n every update. This field is only populated in the Membership returned\n from a successful long-running operation from CreateMembership\n or UpdateMembership. It is not populated during normal GetMembership\n or ListMemberships requests. To get the resource manifest after\n the initial registration, the caller should make a UpdateMembership\n call with an empty field mask.\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: MembershipEndpointKubernetesResourceMembershipResources\n properties:\n clusterScoped:\n type: boolean\n x-dcl-go-name: ClusterScoped\n description: Whether the resource provided in the manifest\n is `cluster_scoped`. If unset, the manifest is assumed to\n be namespace scoped. This field is used for REST mapping\n when applying the resource in a cluster.\n manifest:\n type: string\n x-dcl-go-name: Manifest\n description: YAML manifest of the resource.\n resourceOptions:\n type: object\n x-dcl-go-name: ResourceOptions\n x-dcl-go-type: MembershipEndpointKubernetesResourceResourceOptions\n description: Optional. Options for Kubernetes resource generation.\n properties:\n connectVersion:\n type: string\n x-dcl-go-name: ConnectVersion\n description: Optional. The Connect agent version to use for\n connect_resources. Defaults to the latest GKE Connect version.\n The version must be a currently supported version, obsolete\n versions will be rejected.\n v1beta1Crd:\n type: boolean\n x-dcl-go-name: V1Beta1Crd\n description: Optional. Use `apiextensions/v1beta1` instead of\n `apiextensions/v1` for CustomResourceDefinition resources.\n This option should be set for clusters with Kubernetes apiserver\n versions <1.16.\n externalId:\n type: string\n x-dcl-go-name: ExternalId\n description: 'Optional. An externally-generated and managed ID for this\n Membership. This ID may be modified after creation, but this is not recommended.\n The ID must match the regex: `*` If this Membership represents a Kubernetes\n cluster, this value should be set to the UID of the `kube-system` namespace\n object.'\n infrastructureType:\n type: string\n x-dcl-go-name: InfrastructureType\n x-dcl-go-type: MembershipInfrastructureTypeEnum\n description: 'Optional. The infrastructure type this Membership is running\n on. Possible values: INFRASTRUCTURE_TYPE_UNSPECIFIED, ON_PREM, MULTI_CLOUD'\n enum:\n - INFRASTRUCTURE_TYPE_UNSPECIFIED\n - ON_PREM\n - MULTI_CLOUD\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. GCP labels for this membership.\n lastConnectionTime:\n type: string\n format: date-time\n x-dcl-go-name: LastConnectionTime\n readOnly: true\n description: Output only. For clusters using Connect, the timestamp of the\n most recent connection established with Google Cloud. This time is updated\n every several minutes, not continuously. For clusters that do not use\n GKE Connect, or that have never connected successfully, this field will\n be unset.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Output only. The full, unique name of this Membership resource\n in the format `projects/*/locations/*/memberships/{membership_id}`, set\n during creation. `membership_id` must be a valid RFC 1123 compliant DNS\n label: 1. At most 63 characters in length 2. It must consist of lower\n case alphanumeric characters or `-` 3. It must start and end with an alphanumeric\n character Which can be expressed as the regex: `)?`, with a maximum length\n of 63 characters.'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n state:\n type: object\n x-dcl-go-name: State\n x-dcl-go-type: MembershipState\n readOnly: true\n description: Output only. State of the Membership resource.\n x-kubernetes-immutable: true\n properties:\n code:\n type: string\n x-dcl-go-name: Code\n x-dcl-go-type: MembershipStateCodeEnum\n readOnly: true\n description: 'Output only. The current state of the Membership resource.\n Possible values: CODE_UNSPECIFIED, CREATING, READY, DELETING, UPDATING,\n SERVICE_UPDATING'\n x-kubernetes-immutable: true\n enum:\n - CODE_UNSPECIFIED\n - CREATING\n - READY\n - DELETING\n - UPDATING\n - SERVICE_UPDATING\n uniqueId:\n type: string\n x-dcl-go-name: UniqueId\n readOnly: true\n description: Output only. Google-generated UUID for this resource. This\n is unique across all Membership resources. If a Membership resource is\n deleted and another resource with the same name is created, it gets a\n different unique_id.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. When the Membership was last updated.\n x-kubernetes-immutable: true\n") -// 17895 bytes -// MD5: 36374bf4024e5ab7bd88e041145d6fcc +// 17892 bytes +// MD5: a16e73f58683f9f44d0d1bb1cf1c764e diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership_schema.go index 9802806c3a..79ea32f571 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta/membership_schema.go @@ -132,7 +132,7 @@ func DCLMembershipSchema() *dcl.Schema { Type: "string", GoName: "WorkloadIdentityPool", ReadOnly: true, - Description: "Output only. The name of the workload identity pool in which `issuer` will be recognized. There is a single Workload Identity Pool per Hub that is shared between all Memberships that belong to that Hub. For a Hub hosted in: {PROJECT_ID}, the workload pool format is `{PROJECT_ID}.hub.id.goog`, although this is subject to change in newer versions of this API.", + Description: "Output only. The name of the workload identity pool in which `issuer` will be recognized. There is a single Workload Identity Pool per Hub that is shared between all Memberships that belong to that Hub. For a Hub hosted in {PROJECT_ID}, the workload pool format is `{PROJECT_ID}.hub.id.goog`, although this is subject to change in newer versions of this API.", }, }, }, diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/iam_utils.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/iam_utils.go index 92397ad1bb..7bfe3cf7be 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/iam_utils.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/iam_utils.go @@ -53,6 +53,28 @@ func EncodeServiceAccountCreateRequest(m map[string]interface{}) map[string]inte return EncodeIAMCreateRequest(m, "serviceAccount", "accountId") } +// canonicalizeServiceAccountName compares service account names ignoring the part after @. +func canonicalizeServiceAccountName(m, n interface{}) bool { + mStr, ok := m.(*string) + if !ok { + return false + } + nStr, ok := n.(*string) + if !ok { + return false + } + if mStr == nil && nStr == nil { + return true + } + if mStr == nil || nStr == nil { + return false + } + // Compare values before @. + mVal := strings.Split(*mStr, "@")[0] + nVal := strings.Split(*nStr, "@")[0] + return dcl.PartialSelfLinkToSelfLink(&mVal, &nVal) +} + func (c *Client) GetWorkloadIdentityPool(ctx context.Context, r *WorkloadIdentityPool) (*WorkloadIdentityPool, error) { ctx = dcl.ContextWithRequestID(ctx) ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/role.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/role.go index e3901d9c2b..dd8cb8cc7b 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/role.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/role.go @@ -238,9 +238,8 @@ func (c *Client) GetRole(ctx context.Context, r *Role) (*Role, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Parent = nr.Parent - result.Name = nr.Name + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/service_account.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/service_account.go index 8bfe5378de..fc877a7b80 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/service_account.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/service_account.go @@ -247,9 +247,8 @@ func (c *Client) GetServiceAccount(ctx context.Context, r *ServiceAccount) (*Ser if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/service_account_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/service_account_internal.go index 87c4531cad..47f78967e2 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/service_account_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/service_account_internal.go @@ -477,7 +477,7 @@ func canonicalizeServiceAccountDesiredState(rawDesired, rawInitial *ServiceAccou return rawDesired, nil } canonicalDesired := &ServiceAccount{} - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + if canonicalizeServiceAccountName(rawDesired.Name, rawInitial.Name) { canonicalDesired.Name = rawInitial.Name } else { canonicalDesired.Name = rawDesired.Name @@ -508,7 +508,7 @@ func canonicalizeServiceAccountNewState(c *Client, rawNew, rawDesired *ServiceAc if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { rawNew.Name = rawDesired.Name } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + if canonicalizeServiceAccountName(rawDesired.Name, rawNew.Name) { rawNew.Name = rawDesired.Name } } @@ -817,7 +817,7 @@ func diffServiceAccount(c *Client, desired, actual *ServiceAccount, opts ...dcl. var fn dcl.FieldName var newDiffs []*dcl.FieldDiff // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{CustomDiff: canonicalizeServiceAccountName, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workforce_pool.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workforce_pool.go index 7ebddba467..05217fdc46 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workforce_pool.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workforce_pool.go @@ -181,9 +181,8 @@ func (c *Client) GetWorkforcePool(ctx context.Context, r *WorkforcePool) (*Workf if err != nil { return nil, err } - nr := r.urlNormalized() - result.Location = nr.Location - result.Name = nr.Name + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workforce_pool_provider.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workforce_pool_provider.go index a7c510447d..854bd945bd 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workforce_pool_provider.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workforce_pool_provider.go @@ -281,10 +281,9 @@ func (c *Client) GetWorkforcePoolProvider(ctx context.Context, r *WorkforcePoolP if err != nil { return nil, err } - nr := r.urlNormalized() - result.Location = nr.Location - result.WorkforcePool = nr.WorkforcePool - result.Name = nr.Name + result.Location = r.Location + result.WorkforcePool = r.WorkforcePool + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workload_identity_pool_provider.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workload_identity_pool_provider.go index 3e876a9f7d..6edd6da5a2 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workload_identity_pool_provider.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam/workload_identity_pool_provider.go @@ -287,11 +287,10 @@ func (c *Client) GetWorkloadIdentityPoolProvider(ctx context.Context, r *Workloa if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.WorkloadIdentityPool = nr.WorkloadIdentityPool - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.WorkloadIdentityPool = r.WorkloadIdentityPool + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/brand.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/brand.go index 8697f35e99..fc30f03e91 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/brand.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/brand.go @@ -144,9 +144,8 @@ func (c *Client) GetBrand(ctx context.Context, r *Brand) (*Brand, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/brand_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/brand_internal.go index 5ac9a9c82d..6bfd01bcf1 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/brand_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/brand_internal.go @@ -159,11 +159,8 @@ func (op *createBrandOperation) do(ctx context.Context, r *Brand, c *Client) err op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetBrand(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/identity_aware_proxy_client.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/identity_aware_proxy_client.go index 2f9135487f..d5973cc782 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/identity_aware_proxy_client.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/identity_aware_proxy_client.go @@ -145,10 +145,9 @@ func (c *Client) GetIdentityAwareProxyClient(ctx context.Context, r *IdentityAwa if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Brand = nr.Brand - result.Name = nr.Name + result.Project = r.Project + result.Brand = r.Brand + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/identity_aware_proxy_client_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/identity_aware_proxy_client_internal.go index ee56bfc8bc..8205add915 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/identity_aware_proxy_client_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iap/identity_aware_proxy_client_internal.go @@ -240,11 +240,8 @@ func (op *createIdentityAwareProxyClientOperation) do(ctx context.Context, r *Id op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetIdentityAwareProxyClient(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/config.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/config.go index 5409a3f055..bc30809af6 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/config.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/config.go @@ -1795,8 +1795,7 @@ func (c *Client) GetConfig(ctx context.Context, r *Config) (*Config, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project + result.Project = r.Project c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config.go index ad00486371..3430b09a2b 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config.go @@ -204,9 +204,8 @@ func (c *Client) GetOAuthIdpConfig(ctx context.Context, r *OAuthIdpConfig) (*OAu if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config.yaml index f03c6d2f2e..f68e970a3f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config.yaml @@ -103,9 +103,9 @@ components: type: object x-dcl-go-name: ResponseType x-dcl-go-type: OAuthIdpConfigResponseType - description: 'The multiple response type to request for in the OAuth authorization - flow. This can possibly be a combination of set bits (e.g.: {id\_token, - token}).' + description: The multiple response type to request for in the OAuth authorization + flow. This can possibly be a combination of set bits (e.g. {id\_token, + token}). properties: code: type: boolean diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config_schema.go index b008d22c62..cf833d410c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config_schema.go @@ -141,7 +141,7 @@ func DCLOAuthIdpConfigSchema() *dcl.Schema { Type: "object", GoName: "ResponseType", GoType: "OAuthIdpConfigResponseType", - Description: "The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g.: {id\\_token, token}).", + Description: "The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g. {id\\_token, token}).", Properties: map[string]*dcl.Property{ "code": &dcl.Property{ Type: "boolean", diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config_yaml_embed.go index cea899471d..9554fe3644 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/oauth_idp_config_yaml_embed.go @@ -17,7 +17,7 @@ package identitytoolkit // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/identitytoolkit/oauth_idp_config.yaml -var YAML_oauth_idp_config = []byte("info:\n title: IdentityToolkit/OAuthIdpConfig\n description: The IdentityToolkit OAuthIdpConfig resource\n x-dcl-struct-name: OAuthIdpConfig\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a OAuthIdpConfig\n parameters:\n - name: OAuthIdpConfig\n required: true\n description: A full instance of a OAuthIdpConfig\n apply:\n description: The function used to apply information about a OAuthIdpConfig\n parameters:\n - name: OAuthIdpConfig\n required: true\n description: A full instance of a OAuthIdpConfig\n delete:\n description: The function used to delete a OAuthIdpConfig\n parameters:\n - name: OAuthIdpConfig\n required: true\n description: A full instance of a OAuthIdpConfig\n deleteAll:\n description: The function used to delete all OAuthIdpConfig\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many OAuthIdpConfig\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n OAuthIdpConfig:\n title: OAuthIdpConfig\n x-dcl-id: projects/{{project}}/oauthIdpConfigs/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - project\n properties:\n clientId:\n type: string\n x-dcl-go-name: ClientId\n description: The client id of an OAuth client.\n clientSecret:\n type: string\n x-dcl-go-name: ClientSecret\n description: The client secret of the OAuth client, to enable OIDC code\n flow.\n x-dcl-sensitive: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: The config's display name set by developers.\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: True if allows the user to sign in with the provider.\n issuer:\n type: string\n x-dcl-go-name: Issuer\n description: For OIDC Idps, the issuer identifier.\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of the Config resource\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n responseType:\n type: object\n x-dcl-go-name: ResponseType\n x-dcl-go-type: OAuthIdpConfigResponseType\n description: 'The multiple response type to request for in the OAuth authorization\n flow. This can possibly be a combination of set bits (e.g.: {id\\_token,\n token}).'\n properties:\n code:\n type: boolean\n x-dcl-go-name: Code\n description: If true, authorization code is returned from IdP's authorization\n endpoint.\n idToken:\n type: boolean\n x-dcl-go-name: IdToken\n description: If true, ID token is returned from IdP's authorization\n endpoint.\n token:\n type: boolean\n x-dcl-go-name: Token\n description: If true, access token is returned from IdP's authorization\n endpoint.\n") +var YAML_oauth_idp_config = []byte("info:\n title: IdentityToolkit/OAuthIdpConfig\n description: The IdentityToolkit OAuthIdpConfig resource\n x-dcl-struct-name: OAuthIdpConfig\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a OAuthIdpConfig\n parameters:\n - name: OAuthIdpConfig\n required: true\n description: A full instance of a OAuthIdpConfig\n apply:\n description: The function used to apply information about a OAuthIdpConfig\n parameters:\n - name: OAuthIdpConfig\n required: true\n description: A full instance of a OAuthIdpConfig\n delete:\n description: The function used to delete a OAuthIdpConfig\n parameters:\n - name: OAuthIdpConfig\n required: true\n description: A full instance of a OAuthIdpConfig\n deleteAll:\n description: The function used to delete all OAuthIdpConfig\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many OAuthIdpConfig\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n OAuthIdpConfig:\n title: OAuthIdpConfig\n x-dcl-id: projects/{{project}}/oauthIdpConfigs/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - project\n properties:\n clientId:\n type: string\n x-dcl-go-name: ClientId\n description: The client id of an OAuth client.\n clientSecret:\n type: string\n x-dcl-go-name: ClientSecret\n description: The client secret of the OAuth client, to enable OIDC code\n flow.\n x-dcl-sensitive: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: The config's display name set by developers.\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: True if allows the user to sign in with the provider.\n issuer:\n type: string\n x-dcl-go-name: Issuer\n description: For OIDC Idps, the issuer identifier.\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of the Config resource\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n responseType:\n type: object\n x-dcl-go-name: ResponseType\n x-dcl-go-type: OAuthIdpConfigResponseType\n description: The multiple response type to request for in the OAuth authorization\n flow. This can possibly be a combination of set bits (e.g. {id\\_token,\n token}).\n properties:\n code:\n type: boolean\n x-dcl-go-name: Code\n description: If true, authorization code is returned from IdP's authorization\n endpoint.\n idToken:\n type: boolean\n x-dcl-go-name: IdToken\n description: If true, ID token is returned from IdP's authorization\n endpoint.\n token:\n type: boolean\n x-dcl-go-name: Token\n description: If true, access token is returned from IdP's authorization\n endpoint.\n") -// 3640 bytes -// MD5: c7f5a027cf3b3de1eb3722cc9025fe6c +// 3637 bytes +// MD5: cebd9ced36e76fde1c6e72b26719b90a diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant.go index bf5b126d33..5ebb749240 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant.go @@ -257,9 +257,8 @@ func (c *Client) GetTenant(ctx context.Context, r *Tenant) (*Tenant, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_internal.go index 2e6b325ffd..47f2b47124 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_internal.go @@ -322,11 +322,8 @@ func (op *createTenantOperation) do(ctx context.Context, r *Tenant, c *Client) e op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetTenant(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config.go index 0c65570d60..c5698ab83e 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config.go @@ -207,10 +207,9 @@ func (c *Client) GetTenantOAuthIdpConfig(ctx context.Context, r *TenantOAuthIdpC if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Tenant = nr.Tenant - result.Name = nr.Name + result.Project = r.Project + result.Tenant = r.Tenant + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config.yaml index 9e09abe8f3..d84c60e970 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config.yaml @@ -112,9 +112,9 @@ components: type: object x-dcl-go-name: ResponseType x-dcl-go-type: TenantOAuthIdpConfigResponseType - description: 'The multiple response type to request for in the OAuth authorization - flow. This can possibly be a combination of set bits (e.g.: {id\_token, - token}).' + description: The multiple response type to request for in the OAuth authorization + flow. This can possibly be a combination of set bits (e.g. {id\_token, + token}). properties: code: type: boolean diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config_schema.go index 9d74cf6db5..66760bcc29 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config_schema.go @@ -156,7 +156,7 @@ func DCLTenantOAuthIdpConfigSchema() *dcl.Schema { Type: "object", GoName: "ResponseType", GoType: "TenantOAuthIdpConfigResponseType", - Description: "The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g.: {id\\_token, token}).", + Description: "The multiple response type to request for in the OAuth authorization flow. This can possibly be a combination of set bits (e.g. {id\\_token, token}).", Properties: map[string]*dcl.Property{ "code": &dcl.Property{ Type: "boolean", diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config_yaml_embed.go index 28fd0608fe..7c7692d599 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/identitytoolkit/tenant_oauth_idp_config_yaml_embed.go @@ -17,7 +17,7 @@ package identitytoolkit // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/identitytoolkit/tenant_oauth_idp_config.yaml -var YAML_tenant_oauth_idp_config = []byte("info:\n title: IdentityToolkit/TenantOAuthIdpConfig\n description: The IdentityToolkit TenantOAuthIdpConfig resource\n x-dcl-struct-name: TenantOAuthIdpConfig\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a TenantOAuthIdpConfig\n parameters:\n - name: TenantOAuthIdpConfig\n required: true\n description: A full instance of a TenantOAuthIdpConfig\n apply:\n description: The function used to apply information about a TenantOAuthIdpConfig\n parameters:\n - name: TenantOAuthIdpConfig\n required: true\n description: A full instance of a TenantOAuthIdpConfig\n delete:\n description: The function used to delete a TenantOAuthIdpConfig\n parameters:\n - name: TenantOAuthIdpConfig\n required: true\n description: A full instance of a TenantOAuthIdpConfig\n deleteAll:\n description: The function used to delete all TenantOAuthIdpConfig\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: tenant\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many TenantOAuthIdpConfig\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: tenant\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n TenantOAuthIdpConfig:\n title: TenantOAuthIdpConfig\n x-dcl-id: projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - project\n - tenant\n properties:\n clientId:\n type: string\n x-dcl-go-name: ClientId\n description: The client id of an OAuth client.\n clientSecret:\n type: string\n x-dcl-go-name: ClientSecret\n description: The client secret of the OAuth client, to enable OIDC code\n flow.\n x-dcl-sensitive: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: The config's display name set by developers.\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: True if allows the user to sign in with the provider.\n issuer:\n type: string\n x-dcl-go-name: Issuer\n description: For OIDC Idps, the issuer identifier.\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of the Config resource\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n responseType:\n type: object\n x-dcl-go-name: ResponseType\n x-dcl-go-type: TenantOAuthIdpConfigResponseType\n description: 'The multiple response type to request for in the OAuth authorization\n flow. This can possibly be a combination of set bits (e.g.: {id\\_token,\n token}).'\n properties:\n code:\n type: boolean\n x-dcl-go-name: Code\n description: If true, authorization code is returned from IdP's authorization\n endpoint.\n idToken:\n type: boolean\n x-dcl-go-name: IdToken\n description: If true, ID token is returned from IdP's authorization\n endpoint.\n token:\n type: boolean\n x-dcl-go-name: Token\n description: If true, access token is returned from IdP's authorization\n endpoint.\n tenant:\n type: string\n x-dcl-go-name: Tenant\n description: The tenant for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Identitytoolkit/Tenant\n field: name\n parent: true\n") +var YAML_tenant_oauth_idp_config = []byte("info:\n title: IdentityToolkit/TenantOAuthIdpConfig\n description: The IdentityToolkit TenantOAuthIdpConfig resource\n x-dcl-struct-name: TenantOAuthIdpConfig\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a TenantOAuthIdpConfig\n parameters:\n - name: TenantOAuthIdpConfig\n required: true\n description: A full instance of a TenantOAuthIdpConfig\n apply:\n description: The function used to apply information about a TenantOAuthIdpConfig\n parameters:\n - name: TenantOAuthIdpConfig\n required: true\n description: A full instance of a TenantOAuthIdpConfig\n delete:\n description: The function used to delete a TenantOAuthIdpConfig\n parameters:\n - name: TenantOAuthIdpConfig\n required: true\n description: A full instance of a TenantOAuthIdpConfig\n deleteAll:\n description: The function used to delete all TenantOAuthIdpConfig\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: tenant\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many TenantOAuthIdpConfig\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: tenant\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n TenantOAuthIdpConfig:\n title: TenantOAuthIdpConfig\n x-dcl-id: projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - project\n - tenant\n properties:\n clientId:\n type: string\n x-dcl-go-name: ClientId\n description: The client id of an OAuth client.\n clientSecret:\n type: string\n x-dcl-go-name: ClientSecret\n description: The client secret of the OAuth client, to enable OIDC code\n flow.\n x-dcl-sensitive: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: The config's display name set by developers.\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: True if allows the user to sign in with the provider.\n issuer:\n type: string\n x-dcl-go-name: Issuer\n description: For OIDC Idps, the issuer identifier.\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of the Config resource\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n responseType:\n type: object\n x-dcl-go-name: ResponseType\n x-dcl-go-type: TenantOAuthIdpConfigResponseType\n description: The multiple response type to request for in the OAuth authorization\n flow. This can possibly be a combination of set bits (e.g. {id\\_token,\n token}).\n properties:\n code:\n type: boolean\n x-dcl-go-name: Code\n description: If true, authorization code is returned from IdP's authorization\n endpoint.\n idToken:\n type: boolean\n x-dcl-go-name: IdToken\n description: If true, ID token is returned from IdP's authorization\n endpoint.\n token:\n type: boolean\n x-dcl-go-name: Token\n description: If true, access token is returned from IdP's authorization\n endpoint.\n tenant:\n type: string\n x-dcl-go-name: Tenant\n description: The tenant for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Identitytoolkit/Tenant\n field: name\n parent: true\n") -// 4209 bytes -// MD5: 84dddee2bf3fc305d724638e6a6ef370 +// 4206 bytes +// MD5: d0010eff9ca306f1b08f7bbeca80f99e diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_bucket.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_bucket.go index cb21b894eb..c25e0f79c8 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_bucket.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_bucket.go @@ -180,10 +180,9 @@ func (c *Client) GetLogBucket(ctx context.Context, r *LogBucket) (*LogBucket, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Location = nr.Location - result.Parent = nr.Parent - result.Name = nr.Name + result.Location = r.Location + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_exclusion.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_exclusion.go index 493d6f340e..480f742150 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_exclusion.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_exclusion.go @@ -148,9 +148,8 @@ func (c *Client) GetLogExclusion(ctx context.Context, r *LogExclusion) (*LogExcl if err != nil { return nil, err } - nr := r.urlNormalized() - result.Parent = nr.Parent - result.Name = nr.Name + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric.go index a05f9b500d..7e04ac670b 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric.go @@ -645,9 +645,8 @@ func (c *Client) GetLogMetric(ctx context.Context, r *LogMetric) (*LogMetric, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric.yaml index 96c5785e9b..75f85c6ba4 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric.yaml @@ -366,7 +366,7 @@ components: should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a - unit is as follows: Expression = Component: { "." Component } { "/" + unit is as follows: Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric_beta_yaml_embed.go index 86caba0a2e..e627ac2712 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/logging/beta/log_metric.yaml -var YAML_log_metric = []byte("info:\n title: Logging/LogMetric\n description: The Logging LogMetric resource\n x-dcl-struct-name: LogMetric\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n apply:\n description: The function used to apply information about a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n delete:\n description: The function used to delete a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n deleteAll:\n description: The function used to delete all LogMetric\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many LogMetric\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n LogMetric:\n title: LogMetric\n x-dcl-id: projects/{{project}}/metrics/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - filter\n - project\n properties:\n bucketOptions:\n type: object\n x-dcl-go-name: BucketOptions\n x-dcl-go-type: LogMetricBucketOptions\n description: Optional. The `bucket_options` are required when the logs-based\n metric is using a DISTRIBUTION value type and it describes the bucket\n boundaries used to create a histogram of the extracted values.\n properties:\n explicitBuckets:\n type: object\n x-dcl-go-name: ExplicitBuckets\n x-dcl-go-type: LogMetricBucketOptionsExplicitBuckets\n description: The explicit buckets.\n x-dcl-conflicts:\n - linearBuckets\n - exponentialBuckets\n properties:\n bounds:\n type: array\n x-dcl-go-name: Bounds\n description: The values must be monotonically increasing.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: number\n format: double\n x-dcl-go-type: float64\n exponentialBuckets:\n type: object\n x-dcl-go-name: ExponentialBuckets\n x-dcl-go-type: LogMetricBucketOptionsExponentialBuckets\n description: The exponential buckets.\n x-dcl-conflicts:\n - linearBuckets\n - explicitBuckets\n properties:\n growthFactor:\n type: number\n format: double\n x-dcl-go-name: GrowthFactor\n description: Must be greater than 1.\n numFiniteBuckets:\n type: integer\n format: int64\n x-dcl-go-name: NumFiniteBuckets\n description: Must be greater than 0.\n scale:\n type: number\n format: double\n x-dcl-go-name: Scale\n description: Must be greater than 0.\n linearBuckets:\n type: object\n x-dcl-go-name: LinearBuckets\n x-dcl-go-type: LogMetricBucketOptionsLinearBuckets\n description: The linear bucket.\n x-dcl-conflicts:\n - exponentialBuckets\n - explicitBuckets\n properties:\n numFiniteBuckets:\n type: integer\n format: int64\n x-dcl-go-name: NumFiniteBuckets\n description: Must be greater than 0.\n offset:\n type: number\n format: double\n x-dcl-go-name: Offset\n description: Lower bound of the first bucket.\n width:\n type: number\n format: double\n x-dcl-go-name: Width\n description: Must be greater than 0.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation timestamp of the metric. This field\n may not be present for older metrics.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. A description of this metric, which is used in documentation.\n The maximum length of the description is 8000 characters.\n disabled:\n type: boolean\n x-dcl-go-name: Disabled\n description: Optional. If set to True, then this metric is disabled and\n it does not generate any points.\n filter:\n type: string\n x-dcl-go-name: Filter\n description: 'Required. An [advanced logs filter](https://cloud.google.com/logging/docs/view/advanced_filters)\n which is used to match log entries. Example: \"resource.type=gae_app AND\n severity>=ERROR\" The maximum length of the filter is 20000 characters.'\n labelExtractors:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: LabelExtractors\n description: Optional. A map from a label key string to an extractor expression\n which is used to extract data from a log entry field and assign as the\n label value. Each label key specified in the LabelDescriptor must have\n an associated extractor expression in this map. The syntax of the extractor\n expression is the same as for the `value_extractor` field. The extracted\n value is converted to the type defined in the label descriptor. If the\n either the extraction or the type conversion fails, the label will have\n a default value. The default value for a string label is an empty string,\n for an integer label its 0, and for a boolean label its `false`. Note\n that there are upper bounds on the maximum number of labels and the number\n of active time series that are allowed in a project.\n metricDescriptor:\n type: object\n x-dcl-go-name: MetricDescriptor\n x-dcl-go-type: LogMetricMetricDescriptor\n description: Optional. The metric descriptor associated with the logs-based\n metric. If unspecified, it uses a default metric descriptor with a DELTA\n metric kind, INT64 value type, with no labels and a unit of \"1\". Such\n a metric counts the number of log entries matching the `filter` expression.\n The `name`, `type`, and `description` fields in the `metric_descriptor`\n are output only, and is constructed using the `name` and `description`\n field in the LogMetric. To create a logs-based metric that records a distribution\n of log values, a DELTA metric kind with a DISTRIBUTION value type must\n be used along with a `value_extractor` expression in the LogMetric. Each\n label in the metric descriptor must have a matching label name as the\n key and an extractor expression as the value in the `label_extractors`\n map. The `metric_kind` and `value_type` fields in the `metric_descriptor`\n cannot be updated once initially configured. New labels can be added in\n the `metric_descriptor`, but existing labels cannot be modified except\n for their description.\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n readOnly: true\n description: A detailed description of the metric, which can be used\n in documentation.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A concise name for the metric, which can be displayed in\n user interfaces. Use sentence case without an ending period, for example\n \"Request count\". This field is optional but it is recommended to be\n set for any metrics associated with user-visible concepts, such as\n Quota.\n labels:\n type: array\n x-dcl-go-name: Labels\n description: The set of labels that can be used to describe a specific\n instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies`\n metric type has a label for the HTTP response code, `response_code`,\n so you can look at latencies for successful responses or just for\n responses that failed.\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: LogMetricMetricDescriptorLabels\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A human-readable description for the label.\n x-kubernetes-immutable: true\n key:\n type: string\n x-dcl-go-name: Key\n description: The label key.\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: LogMetricMetricDescriptorLabelsValueTypeEnum\n description: 'The type of data that can be assigned to the label.\n Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION,\n MONEY'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n - DOUBLE\n - DISTRIBUTION\n - MONEY\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: LogMetricMetricDescriptorLaunchStageEnum\n description: 'Optional. The launch stage of the metric definition. Possible\n values: UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED'\n enum:\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n x-dcl-mutable-unreadable: true\n metadata:\n type: object\n x-dcl-go-name: Metadata\n x-dcl-go-type: LogMetricMetricDescriptorMetadata\n description: Optional. Metadata which can be used to guide usage of\n the metric.\n x-dcl-mutable-unreadable: true\n properties:\n ingestDelay:\n type: string\n x-dcl-go-name: IngestDelay\n description: The delay of data points caused by ingestion. Data\n points older than this age are guaranteed to be ingested and available\n to be read, excluding data loss due to errors.\n samplePeriod:\n type: string\n x-dcl-go-name: SamplePeriod\n description: The sampling period of metric data points. For metrics\n which are written periodically, consecutive data points are stored\n at this time interval, excluding data loss due to errors. Metrics\n with a higher granularity have a smaller sampling period.\n metricKind:\n type: string\n x-dcl-go-name: MetricKind\n x-dcl-go-type: LogMetricMetricDescriptorMetricKindEnum\n description: 'Whether the metric records instantaneous values, changes\n to a value, etc. Some combinations of `metric_kind` and `value_type`\n might not be supported. Possible values: GAUGE, DELTA, CUMULATIVE'\n x-kubernetes-immutable: true\n enum:\n - GAUGE\n - DELTA\n - CUMULATIVE\n monitoredResourceTypes:\n type: array\n x-dcl-go-name: MonitoredResourceTypes\n readOnly: true\n description: Read-only. If present, then a time series, which is identified\n partially by a metric type and a MonitoredResourceDescriptor, that\n is associated with this metric type can only be associated with one\n of the monitored resource types listed here.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n readOnly: true\n description: The resource name of the metric descriptor.\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n readOnly: true\n description: 'The metric type, including its DNS name prefix. The type\n is not URL-encoded. All user-defined metric types have the DNS name\n `custom.googleapis.com` or `external.googleapis.com`. Metric types\n should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\"\n \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\"'\n x-kubernetes-immutable: true\n unit:\n type: string\n x-dcl-go-name: Unit\n description: 'The units in which the metric value is reported. It is\n only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`.\n The `unit` defines the representation of the stored metric values.\n Different systems might scale the values to be more easily displayed\n (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value\n of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit`\n is `kBy`, then the value of the metric is always in thousands of bytes,\n no matter how it might be displayed. If you want a custom metric to\n record the exact number of CPU-seconds used by a job, you can create\n an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently\n `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the\n value is written as `12005`. Alternatively, if you want a custom metric\n to record data in a more granular way, you can create a `DOUBLE CUMULATIVE`\n metric whose `unit` is `ks{CPU}`, and then write the value `12.005`\n (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which\n is `12005/1024`). The supported units are a subset of [The Unified\n Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard:\n **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min`\n minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)**\n * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12)\n * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta\n (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9)\n * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z`\n zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi\n (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar**\n The grammar also includes these connectors: * `/` division or ratio\n (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms`\n (although you should almost never have `/s` in a metric `unit`; rates\n should always be computed at query time from the underlying cumulative\n or delta value). * `.` multiplication or composition (as an infix\n operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a\n unit is as follows: Expression = Component: { \".\" Component } { \"/\"\n Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ]\n | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation`\n is just a comment if it follows a `UNIT`. If the annotation is used\n alone, then the unit is equivalent to `1`. For examples, `{request}/s\n == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank\n printable ASCII characters not containing `{` or `}`. * `1` represents\n a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity)\n of 1, such as in `1/s`. It is typically used when none of the basic\n units are appropriate. For example, \"new users per day\" can be represented\n as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5\n new users). Alternatively, \"thousands of page views per day\" would\n be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric\n value of `5.3` would mean \"5300 page views per day\"). * `%` represents\n dimensionless value of 1/100, and annotates values giving a percentage\n (so the metric values are typically in the range of 0..100, and a\n metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric\n contains a ratio, typically in the range 0..1, that will be multiplied\n by 100 and displayed as a percentage (so a metric value `0.03` means\n \"3 percent\").'\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: LogMetricMetricDescriptorValueTypeEnum\n description: 'Whether the measurement is an integer, a floating-point\n number, etc. Some combinations of `metric_kind` and `value_type` might\n not be supported. Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION,\n MONEY'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n - DOUBLE\n - DISTRIBUTION\n - MONEY\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. The client-assigned metric identifier. Examples:\n `\"error_count\"`, `\"nginx/requests\"`. Metric identifiers are limited to\n 100 characters and can include only the following characters: `A-Z`, `a-z`,\n `0-9`, and the special characters `_-.,+!*'',()%/`. The forward-slash\n character (`/`) denotes a hierarchy of name pieces, and it cannot be the\n first character of the name. The metric identifier in this field must\n not be [URL-encoded](https://en.wikipedia.org/wiki/Percent-encoding).\n However, when the metric identifier appears as the `[METRIC_ID]` part\n of a `metric_name` API parameter, then the metric identifier must be URL-encoded.\n Example: `\"projects/my-project/metrics/nginx%2Frequests\"`.'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The resource name of the project in which to create the metric.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last update timestamp of the metric. This\n field may not be present for older metrics.\n x-kubernetes-immutable: true\n valueExtractor:\n type: string\n x-dcl-go-name: ValueExtractor\n description: 'Optional. A `value_extractor` is required when using a distribution\n logs-based metric to extract the values to record from a log entry. Two\n functions are supported for value extraction: `EXTRACT(field)` or `REGEXP_EXTRACT(field,\n regex)`. The argument are: 1. field: The name of the log entry field from\n which the value is to be extracted. 2. regex: A regular expression using\n the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with\n a single capture group to extract data from the specified log entry field.\n The value of the field is converted to a string before applying the regex.\n It is an error to specify a regex that does not include exactly one capture\n group. The result of the extraction must be convertible to a double type,\n as the distribution always records double values. If either the extraction\n or the conversion to double fails, then those values are not recorded\n in the distribution. Example: `REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(d+).*\")`'\n") +var YAML_log_metric = []byte("info:\n title: Logging/LogMetric\n description: The Logging LogMetric resource\n x-dcl-struct-name: LogMetric\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n apply:\n description: The function used to apply information about a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n delete:\n description: The function used to delete a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n deleteAll:\n description: The function used to delete all LogMetric\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many LogMetric\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n LogMetric:\n title: LogMetric\n x-dcl-id: projects/{{project}}/metrics/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - filter\n - project\n properties:\n bucketOptions:\n type: object\n x-dcl-go-name: BucketOptions\n x-dcl-go-type: LogMetricBucketOptions\n description: Optional. The `bucket_options` are required when the logs-based\n metric is using a DISTRIBUTION value type and it describes the bucket\n boundaries used to create a histogram of the extracted values.\n properties:\n explicitBuckets:\n type: object\n x-dcl-go-name: ExplicitBuckets\n x-dcl-go-type: LogMetricBucketOptionsExplicitBuckets\n description: The explicit buckets.\n x-dcl-conflicts:\n - linearBuckets\n - exponentialBuckets\n properties:\n bounds:\n type: array\n x-dcl-go-name: Bounds\n description: The values must be monotonically increasing.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: number\n format: double\n x-dcl-go-type: float64\n exponentialBuckets:\n type: object\n x-dcl-go-name: ExponentialBuckets\n x-dcl-go-type: LogMetricBucketOptionsExponentialBuckets\n description: The exponential buckets.\n x-dcl-conflicts:\n - linearBuckets\n - explicitBuckets\n properties:\n growthFactor:\n type: number\n format: double\n x-dcl-go-name: GrowthFactor\n description: Must be greater than 1.\n numFiniteBuckets:\n type: integer\n format: int64\n x-dcl-go-name: NumFiniteBuckets\n description: Must be greater than 0.\n scale:\n type: number\n format: double\n x-dcl-go-name: Scale\n description: Must be greater than 0.\n linearBuckets:\n type: object\n x-dcl-go-name: LinearBuckets\n x-dcl-go-type: LogMetricBucketOptionsLinearBuckets\n description: The linear bucket.\n x-dcl-conflicts:\n - exponentialBuckets\n - explicitBuckets\n properties:\n numFiniteBuckets:\n type: integer\n format: int64\n x-dcl-go-name: NumFiniteBuckets\n description: Must be greater than 0.\n offset:\n type: number\n format: double\n x-dcl-go-name: Offset\n description: Lower bound of the first bucket.\n width:\n type: number\n format: double\n x-dcl-go-name: Width\n description: Must be greater than 0.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation timestamp of the metric. This field\n may not be present for older metrics.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. A description of this metric, which is used in documentation.\n The maximum length of the description is 8000 characters.\n disabled:\n type: boolean\n x-dcl-go-name: Disabled\n description: Optional. If set to True, then this metric is disabled and\n it does not generate any points.\n filter:\n type: string\n x-dcl-go-name: Filter\n description: 'Required. An [advanced logs filter](https://cloud.google.com/logging/docs/view/advanced_filters)\n which is used to match log entries. Example: \"resource.type=gae_app AND\n severity>=ERROR\" The maximum length of the filter is 20000 characters.'\n labelExtractors:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: LabelExtractors\n description: Optional. A map from a label key string to an extractor expression\n which is used to extract data from a log entry field and assign as the\n label value. Each label key specified in the LabelDescriptor must have\n an associated extractor expression in this map. The syntax of the extractor\n expression is the same as for the `value_extractor` field. The extracted\n value is converted to the type defined in the label descriptor. If the\n either the extraction or the type conversion fails, the label will have\n a default value. The default value for a string label is an empty string,\n for an integer label its 0, and for a boolean label its `false`. Note\n that there are upper bounds on the maximum number of labels and the number\n of active time series that are allowed in a project.\n metricDescriptor:\n type: object\n x-dcl-go-name: MetricDescriptor\n x-dcl-go-type: LogMetricMetricDescriptor\n description: Optional. The metric descriptor associated with the logs-based\n metric. If unspecified, it uses a default metric descriptor with a DELTA\n metric kind, INT64 value type, with no labels and a unit of \"1\". Such\n a metric counts the number of log entries matching the `filter` expression.\n The `name`, `type`, and `description` fields in the `metric_descriptor`\n are output only, and is constructed using the `name` and `description`\n field in the LogMetric. To create a logs-based metric that records a distribution\n of log values, a DELTA metric kind with a DISTRIBUTION value type must\n be used along with a `value_extractor` expression in the LogMetric. Each\n label in the metric descriptor must have a matching label name as the\n key and an extractor expression as the value in the `label_extractors`\n map. The `metric_kind` and `value_type` fields in the `metric_descriptor`\n cannot be updated once initially configured. New labels can be added in\n the `metric_descriptor`, but existing labels cannot be modified except\n for their description.\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n readOnly: true\n description: A detailed description of the metric, which can be used\n in documentation.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A concise name for the metric, which can be displayed in\n user interfaces. Use sentence case without an ending period, for example\n \"Request count\". This field is optional but it is recommended to be\n set for any metrics associated with user-visible concepts, such as\n Quota.\n labels:\n type: array\n x-dcl-go-name: Labels\n description: The set of labels that can be used to describe a specific\n instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies`\n metric type has a label for the HTTP response code, `response_code`,\n so you can look at latencies for successful responses or just for\n responses that failed.\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: LogMetricMetricDescriptorLabels\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A human-readable description for the label.\n x-kubernetes-immutable: true\n key:\n type: string\n x-dcl-go-name: Key\n description: The label key.\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: LogMetricMetricDescriptorLabelsValueTypeEnum\n description: 'The type of data that can be assigned to the label.\n Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION,\n MONEY'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n - DOUBLE\n - DISTRIBUTION\n - MONEY\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: LogMetricMetricDescriptorLaunchStageEnum\n description: 'Optional. The launch stage of the metric definition. Possible\n values: UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED'\n enum:\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n x-dcl-mutable-unreadable: true\n metadata:\n type: object\n x-dcl-go-name: Metadata\n x-dcl-go-type: LogMetricMetricDescriptorMetadata\n description: Optional. Metadata which can be used to guide usage of\n the metric.\n x-dcl-mutable-unreadable: true\n properties:\n ingestDelay:\n type: string\n x-dcl-go-name: IngestDelay\n description: The delay of data points caused by ingestion. Data\n points older than this age are guaranteed to be ingested and available\n to be read, excluding data loss due to errors.\n samplePeriod:\n type: string\n x-dcl-go-name: SamplePeriod\n description: The sampling period of metric data points. For metrics\n which are written periodically, consecutive data points are stored\n at this time interval, excluding data loss due to errors. Metrics\n with a higher granularity have a smaller sampling period.\n metricKind:\n type: string\n x-dcl-go-name: MetricKind\n x-dcl-go-type: LogMetricMetricDescriptorMetricKindEnum\n description: 'Whether the metric records instantaneous values, changes\n to a value, etc. Some combinations of `metric_kind` and `value_type`\n might not be supported. Possible values: GAUGE, DELTA, CUMULATIVE'\n x-kubernetes-immutable: true\n enum:\n - GAUGE\n - DELTA\n - CUMULATIVE\n monitoredResourceTypes:\n type: array\n x-dcl-go-name: MonitoredResourceTypes\n readOnly: true\n description: Read-only. If present, then a time series, which is identified\n partially by a metric type and a MonitoredResourceDescriptor, that\n is associated with this metric type can only be associated with one\n of the monitored resource types listed here.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n readOnly: true\n description: The resource name of the metric descriptor.\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n readOnly: true\n description: 'The metric type, including its DNS name prefix. The type\n is not URL-encoded. All user-defined metric types have the DNS name\n `custom.googleapis.com` or `external.googleapis.com`. Metric types\n should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\"\n \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\"'\n x-kubernetes-immutable: true\n unit:\n type: string\n x-dcl-go-name: Unit\n description: 'The units in which the metric value is reported. It is\n only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`.\n The `unit` defines the representation of the stored metric values.\n Different systems might scale the values to be more easily displayed\n (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value\n of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit`\n is `kBy`, then the value of the metric is always in thousands of bytes,\n no matter how it might be displayed. If you want a custom metric to\n record the exact number of CPU-seconds used by a job, you can create\n an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently\n `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the\n value is written as `12005`. Alternatively, if you want a custom metric\n to record data in a more granular way, you can create a `DOUBLE CUMULATIVE`\n metric whose `unit` is `ks{CPU}`, and then write the value `12.005`\n (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which\n is `12005/1024`). The supported units are a subset of [The Unified\n Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard:\n **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min`\n minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)**\n * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12)\n * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta\n (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9)\n * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z`\n zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi\n (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar**\n The grammar also includes these connectors: * `/` division or ratio\n (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms`\n (although you should almost never have `/s` in a metric `unit`; rates\n should always be computed at query time from the underlying cumulative\n or delta value). * `.` multiplication or composition (as an infix\n operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a\n unit is as follows: Expression = Component { \".\" Component } { \"/\"\n Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ]\n | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation`\n is just a comment if it follows a `UNIT`. If the annotation is used\n alone, then the unit is equivalent to `1`. For examples, `{request}/s\n == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank\n printable ASCII characters not containing `{` or `}`. * `1` represents\n a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity)\n of 1, such as in `1/s`. It is typically used when none of the basic\n units are appropriate. For example, \"new users per day\" can be represented\n as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5\n new users). Alternatively, \"thousands of page views per day\" would\n be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric\n value of `5.3` would mean \"5300 page views per day\"). * `%` represents\n dimensionless value of 1/100, and annotates values giving a percentage\n (so the metric values are typically in the range of 0..100, and a\n metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric\n contains a ratio, typically in the range 0..1, that will be multiplied\n by 100 and displayed as a percentage (so a metric value `0.03` means\n \"3 percent\").'\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: LogMetricMetricDescriptorValueTypeEnum\n description: 'Whether the measurement is an integer, a floating-point\n number, etc. Some combinations of `metric_kind` and `value_type` might\n not be supported. Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION,\n MONEY'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n - DOUBLE\n - DISTRIBUTION\n - MONEY\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. The client-assigned metric identifier. Examples:\n `\"error_count\"`, `\"nginx/requests\"`. Metric identifiers are limited to\n 100 characters and can include only the following characters: `A-Z`, `a-z`,\n `0-9`, and the special characters `_-.,+!*'',()%/`. The forward-slash\n character (`/`) denotes a hierarchy of name pieces, and it cannot be the\n first character of the name. The metric identifier in this field must\n not be [URL-encoded](https://en.wikipedia.org/wiki/Percent-encoding).\n However, when the metric identifier appears as the `[METRIC_ID]` part\n of a `metric_name` API parameter, then the metric identifier must be URL-encoded.\n Example: `\"projects/my-project/metrics/nginx%2Frequests\"`.'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The resource name of the project in which to create the metric.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last update timestamp of the metric. This\n field may not be present for older metrics.\n x-kubernetes-immutable: true\n valueExtractor:\n type: string\n x-dcl-go-name: ValueExtractor\n description: 'Optional. A `value_extractor` is required when using a distribution\n logs-based metric to extract the values to record from a log entry. Two\n functions are supported for value extraction: `EXTRACT(field)` or `REGEXP_EXTRACT(field,\n regex)`. The argument are: 1. field: The name of the log entry field from\n which the value is to be extracted. 2. regex: A regular expression using\n the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with\n a single capture group to extract data from the specified log entry field.\n The value of the field is converted to a string before applying the regex.\n It is an error to specify a regex that does not include exactly one capture\n group. The result of the extraction must be convertible to a double type,\n as the distribution always records double values. If either the extraction\n or the conversion to double fails, then those values are not recorded\n in the distribution. Example: `REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(d+).*\")`'\n") -// 22059 bytes -// MD5: c3404550568b8db8f3aa1969ac4367e5 +// 22058 bytes +// MD5: a2e241a0cd3ea262a661d622bdb18e0d diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric_schema.go index 7912b5eeb7..2a576ab1d4 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_metric_schema.go @@ -352,7 +352,7 @@ func DCLLogMetricSchema() *dcl.Schema { "unit": &dcl.Property{ Type: "string", GoName: "Unit", - Description: "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component: { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", + Description: "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", }, "valueType": &dcl.Property{ Type: "string", diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_view.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_view.go index 838d02faf7..bf1ec0865a 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_view.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/beta/log_view.go @@ -152,11 +152,10 @@ func (c *Client) GetLogView(ctx context.Context, r *LogView) (*LogView, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Location = nr.Location - result.Bucket = nr.Bucket - result.Parent = nr.Parent - result.Name = nr.Name + result.Location = r.Location + result.Bucket = r.Bucket + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket.go index bd80b78960..30375e5901 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket.go @@ -180,10 +180,9 @@ func (c *Client) GetLogBucket(ctx context.Context, r *LogBucket) (*LogBucket, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Location = nr.Location - result.Parent = nr.Parent - result.Name = nr.Name + result.Location = r.Location + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion.go index b4ea6200ce..30d6855790 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion.go @@ -148,9 +148,8 @@ func (c *Client) GetLogExclusion(ctx context.Context, r *LogExclusion) (*LogExcl if err != nil { return nil, err } - nr := r.urlNormalized() - result.Parent = nr.Parent - result.Name = nr.Name + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.go index f4701c460a..f8f2653253 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.go @@ -645,9 +645,8 @@ func (c *Client) GetLogMetric(ctx context.Context, r *LogMetric) (*LogMetric, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.yaml index 96c5785e9b..75f85c6ba4 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.yaml @@ -366,7 +366,7 @@ components: should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a - unit is as follows: Expression = Component: { "." Component } { "/" + unit is as follows: Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_schema.go index cca7f16504..9b6005d92f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_schema.go @@ -352,7 +352,7 @@ func DCLLogMetricSchema() *dcl.Schema { "unit": &dcl.Property{ Type: "string", GoName: "Unit", - Description: "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component: { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", + Description: "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", }, "valueType": &dcl.Property{ Type: "string", diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_yaml_embed.go index 1fb57652ac..5cfaf8a4d7 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_yaml_embed.go @@ -17,7 +17,7 @@ package logging // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/logging/log_metric.yaml -var YAML_log_metric = []byte("info:\n title: Logging/LogMetric\n description: The Logging LogMetric resource\n x-dcl-struct-name: LogMetric\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n apply:\n description: The function used to apply information about a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n delete:\n description: The function used to delete a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n deleteAll:\n description: The function used to delete all LogMetric\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many LogMetric\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n LogMetric:\n title: LogMetric\n x-dcl-id: projects/{{project}}/metrics/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - filter\n - project\n properties:\n bucketOptions:\n type: object\n x-dcl-go-name: BucketOptions\n x-dcl-go-type: LogMetricBucketOptions\n description: Optional. The `bucket_options` are required when the logs-based\n metric is using a DISTRIBUTION value type and it describes the bucket\n boundaries used to create a histogram of the extracted values.\n properties:\n explicitBuckets:\n type: object\n x-dcl-go-name: ExplicitBuckets\n x-dcl-go-type: LogMetricBucketOptionsExplicitBuckets\n description: The explicit buckets.\n x-dcl-conflicts:\n - linearBuckets\n - exponentialBuckets\n properties:\n bounds:\n type: array\n x-dcl-go-name: Bounds\n description: The values must be monotonically increasing.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: number\n format: double\n x-dcl-go-type: float64\n exponentialBuckets:\n type: object\n x-dcl-go-name: ExponentialBuckets\n x-dcl-go-type: LogMetricBucketOptionsExponentialBuckets\n description: The exponential buckets.\n x-dcl-conflicts:\n - linearBuckets\n - explicitBuckets\n properties:\n growthFactor:\n type: number\n format: double\n x-dcl-go-name: GrowthFactor\n description: Must be greater than 1.\n numFiniteBuckets:\n type: integer\n format: int64\n x-dcl-go-name: NumFiniteBuckets\n description: Must be greater than 0.\n scale:\n type: number\n format: double\n x-dcl-go-name: Scale\n description: Must be greater than 0.\n linearBuckets:\n type: object\n x-dcl-go-name: LinearBuckets\n x-dcl-go-type: LogMetricBucketOptionsLinearBuckets\n description: The linear bucket.\n x-dcl-conflicts:\n - exponentialBuckets\n - explicitBuckets\n properties:\n numFiniteBuckets:\n type: integer\n format: int64\n x-dcl-go-name: NumFiniteBuckets\n description: Must be greater than 0.\n offset:\n type: number\n format: double\n x-dcl-go-name: Offset\n description: Lower bound of the first bucket.\n width:\n type: number\n format: double\n x-dcl-go-name: Width\n description: Must be greater than 0.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation timestamp of the metric. This field\n may not be present for older metrics.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. A description of this metric, which is used in documentation.\n The maximum length of the description is 8000 characters.\n disabled:\n type: boolean\n x-dcl-go-name: Disabled\n description: Optional. If set to True, then this metric is disabled and\n it does not generate any points.\n filter:\n type: string\n x-dcl-go-name: Filter\n description: 'Required. An [advanced logs filter](https://cloud.google.com/logging/docs/view/advanced_filters)\n which is used to match log entries. Example: \"resource.type=gae_app AND\n severity>=ERROR\" The maximum length of the filter is 20000 characters.'\n labelExtractors:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: LabelExtractors\n description: Optional. A map from a label key string to an extractor expression\n which is used to extract data from a log entry field and assign as the\n label value. Each label key specified in the LabelDescriptor must have\n an associated extractor expression in this map. The syntax of the extractor\n expression is the same as for the `value_extractor` field. The extracted\n value is converted to the type defined in the label descriptor. If the\n either the extraction or the type conversion fails, the label will have\n a default value. The default value for a string label is an empty string,\n for an integer label its 0, and for a boolean label its `false`. Note\n that there are upper bounds on the maximum number of labels and the number\n of active time series that are allowed in a project.\n metricDescriptor:\n type: object\n x-dcl-go-name: MetricDescriptor\n x-dcl-go-type: LogMetricMetricDescriptor\n description: Optional. The metric descriptor associated with the logs-based\n metric. If unspecified, it uses a default metric descriptor with a DELTA\n metric kind, INT64 value type, with no labels and a unit of \"1\". Such\n a metric counts the number of log entries matching the `filter` expression.\n The `name`, `type`, and `description` fields in the `metric_descriptor`\n are output only, and is constructed using the `name` and `description`\n field in the LogMetric. To create a logs-based metric that records a distribution\n of log values, a DELTA metric kind with a DISTRIBUTION value type must\n be used along with a `value_extractor` expression in the LogMetric. Each\n label in the metric descriptor must have a matching label name as the\n key and an extractor expression as the value in the `label_extractors`\n map. The `metric_kind` and `value_type` fields in the `metric_descriptor`\n cannot be updated once initially configured. New labels can be added in\n the `metric_descriptor`, but existing labels cannot be modified except\n for their description.\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n readOnly: true\n description: A detailed description of the metric, which can be used\n in documentation.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A concise name for the metric, which can be displayed in\n user interfaces. Use sentence case without an ending period, for example\n \"Request count\". This field is optional but it is recommended to be\n set for any metrics associated with user-visible concepts, such as\n Quota.\n labels:\n type: array\n x-dcl-go-name: Labels\n description: The set of labels that can be used to describe a specific\n instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies`\n metric type has a label for the HTTP response code, `response_code`,\n so you can look at latencies for successful responses or just for\n responses that failed.\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: LogMetricMetricDescriptorLabels\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A human-readable description for the label.\n x-kubernetes-immutable: true\n key:\n type: string\n x-dcl-go-name: Key\n description: The label key.\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: LogMetricMetricDescriptorLabelsValueTypeEnum\n description: 'The type of data that can be assigned to the label.\n Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION,\n MONEY'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n - DOUBLE\n - DISTRIBUTION\n - MONEY\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: LogMetricMetricDescriptorLaunchStageEnum\n description: 'Optional. The launch stage of the metric definition. Possible\n values: UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED'\n enum:\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n x-dcl-mutable-unreadable: true\n metadata:\n type: object\n x-dcl-go-name: Metadata\n x-dcl-go-type: LogMetricMetricDescriptorMetadata\n description: Optional. Metadata which can be used to guide usage of\n the metric.\n x-dcl-mutable-unreadable: true\n properties:\n ingestDelay:\n type: string\n x-dcl-go-name: IngestDelay\n description: The delay of data points caused by ingestion. Data\n points older than this age are guaranteed to be ingested and available\n to be read, excluding data loss due to errors.\n samplePeriod:\n type: string\n x-dcl-go-name: SamplePeriod\n description: The sampling period of metric data points. For metrics\n which are written periodically, consecutive data points are stored\n at this time interval, excluding data loss due to errors. Metrics\n with a higher granularity have a smaller sampling period.\n metricKind:\n type: string\n x-dcl-go-name: MetricKind\n x-dcl-go-type: LogMetricMetricDescriptorMetricKindEnum\n description: 'Whether the metric records instantaneous values, changes\n to a value, etc. Some combinations of `metric_kind` and `value_type`\n might not be supported. Possible values: GAUGE, DELTA, CUMULATIVE'\n x-kubernetes-immutable: true\n enum:\n - GAUGE\n - DELTA\n - CUMULATIVE\n monitoredResourceTypes:\n type: array\n x-dcl-go-name: MonitoredResourceTypes\n readOnly: true\n description: Read-only. If present, then a time series, which is identified\n partially by a metric type and a MonitoredResourceDescriptor, that\n is associated with this metric type can only be associated with one\n of the monitored resource types listed here.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n readOnly: true\n description: The resource name of the metric descriptor.\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n readOnly: true\n description: 'The metric type, including its DNS name prefix. The type\n is not URL-encoded. All user-defined metric types have the DNS name\n `custom.googleapis.com` or `external.googleapis.com`. Metric types\n should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\"\n \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\"'\n x-kubernetes-immutable: true\n unit:\n type: string\n x-dcl-go-name: Unit\n description: 'The units in which the metric value is reported. It is\n only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`.\n The `unit` defines the representation of the stored metric values.\n Different systems might scale the values to be more easily displayed\n (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value\n of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit`\n is `kBy`, then the value of the metric is always in thousands of bytes,\n no matter how it might be displayed. If you want a custom metric to\n record the exact number of CPU-seconds used by a job, you can create\n an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently\n `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the\n value is written as `12005`. Alternatively, if you want a custom metric\n to record data in a more granular way, you can create a `DOUBLE CUMULATIVE`\n metric whose `unit` is `ks{CPU}`, and then write the value `12.005`\n (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which\n is `12005/1024`). The supported units are a subset of [The Unified\n Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard:\n **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min`\n minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)**\n * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12)\n * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta\n (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9)\n * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z`\n zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi\n (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar**\n The grammar also includes these connectors: * `/` division or ratio\n (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms`\n (although you should almost never have `/s` in a metric `unit`; rates\n should always be computed at query time from the underlying cumulative\n or delta value). * `.` multiplication or composition (as an infix\n operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a\n unit is as follows: Expression = Component: { \".\" Component } { \"/\"\n Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ]\n | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation`\n is just a comment if it follows a `UNIT`. If the annotation is used\n alone, then the unit is equivalent to `1`. For examples, `{request}/s\n == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank\n printable ASCII characters not containing `{` or `}`. * `1` represents\n a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity)\n of 1, such as in `1/s`. It is typically used when none of the basic\n units are appropriate. For example, \"new users per day\" can be represented\n as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5\n new users). Alternatively, \"thousands of page views per day\" would\n be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric\n value of `5.3` would mean \"5300 page views per day\"). * `%` represents\n dimensionless value of 1/100, and annotates values giving a percentage\n (so the metric values are typically in the range of 0..100, and a\n metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric\n contains a ratio, typically in the range 0..1, that will be multiplied\n by 100 and displayed as a percentage (so a metric value `0.03` means\n \"3 percent\").'\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: LogMetricMetricDescriptorValueTypeEnum\n description: 'Whether the measurement is an integer, a floating-point\n number, etc. Some combinations of `metric_kind` and `value_type` might\n not be supported. Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION,\n MONEY'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n - DOUBLE\n - DISTRIBUTION\n - MONEY\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. The client-assigned metric identifier. Examples:\n `\"error_count\"`, `\"nginx/requests\"`. Metric identifiers are limited to\n 100 characters and can include only the following characters: `A-Z`, `a-z`,\n `0-9`, and the special characters `_-.,+!*'',()%/`. The forward-slash\n character (`/`) denotes a hierarchy of name pieces, and it cannot be the\n first character of the name. The metric identifier in this field must\n not be [URL-encoded](https://en.wikipedia.org/wiki/Percent-encoding).\n However, when the metric identifier appears as the `[METRIC_ID]` part\n of a `metric_name` API parameter, then the metric identifier must be URL-encoded.\n Example: `\"projects/my-project/metrics/nginx%2Frequests\"`.'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The resource name of the project in which to create the metric.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last update timestamp of the metric. This\n field may not be present for older metrics.\n x-kubernetes-immutable: true\n valueExtractor:\n type: string\n x-dcl-go-name: ValueExtractor\n description: 'Optional. A `value_extractor` is required when using a distribution\n logs-based metric to extract the values to record from a log entry. Two\n functions are supported for value extraction: `EXTRACT(field)` or `REGEXP_EXTRACT(field,\n regex)`. The argument are: 1. field: The name of the log entry field from\n which the value is to be extracted. 2. regex: A regular expression using\n the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with\n a single capture group to extract data from the specified log entry field.\n The value of the field is converted to a string before applying the regex.\n It is an error to specify a regex that does not include exactly one capture\n group. The result of the extraction must be convertible to a double type,\n as the distribution always records double values. If either the extraction\n or the conversion to double fails, then those values are not recorded\n in the distribution. Example: `REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(d+).*\")`'\n") +var YAML_log_metric = []byte("info:\n title: Logging/LogMetric\n description: The Logging LogMetric resource\n x-dcl-struct-name: LogMetric\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n apply:\n description: The function used to apply information about a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n delete:\n description: The function used to delete a LogMetric\n parameters:\n - name: LogMetric\n required: true\n description: A full instance of a LogMetric\n deleteAll:\n description: The function used to delete all LogMetric\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many LogMetric\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n LogMetric:\n title: LogMetric\n x-dcl-id: projects/{{project}}/metrics/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - filter\n - project\n properties:\n bucketOptions:\n type: object\n x-dcl-go-name: BucketOptions\n x-dcl-go-type: LogMetricBucketOptions\n description: Optional. The `bucket_options` are required when the logs-based\n metric is using a DISTRIBUTION value type and it describes the bucket\n boundaries used to create a histogram of the extracted values.\n properties:\n explicitBuckets:\n type: object\n x-dcl-go-name: ExplicitBuckets\n x-dcl-go-type: LogMetricBucketOptionsExplicitBuckets\n description: The explicit buckets.\n x-dcl-conflicts:\n - linearBuckets\n - exponentialBuckets\n properties:\n bounds:\n type: array\n x-dcl-go-name: Bounds\n description: The values must be monotonically increasing.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: number\n format: double\n x-dcl-go-type: float64\n exponentialBuckets:\n type: object\n x-dcl-go-name: ExponentialBuckets\n x-dcl-go-type: LogMetricBucketOptionsExponentialBuckets\n description: The exponential buckets.\n x-dcl-conflicts:\n - linearBuckets\n - explicitBuckets\n properties:\n growthFactor:\n type: number\n format: double\n x-dcl-go-name: GrowthFactor\n description: Must be greater than 1.\n numFiniteBuckets:\n type: integer\n format: int64\n x-dcl-go-name: NumFiniteBuckets\n description: Must be greater than 0.\n scale:\n type: number\n format: double\n x-dcl-go-name: Scale\n description: Must be greater than 0.\n linearBuckets:\n type: object\n x-dcl-go-name: LinearBuckets\n x-dcl-go-type: LogMetricBucketOptionsLinearBuckets\n description: The linear bucket.\n x-dcl-conflicts:\n - exponentialBuckets\n - explicitBuckets\n properties:\n numFiniteBuckets:\n type: integer\n format: int64\n x-dcl-go-name: NumFiniteBuckets\n description: Must be greater than 0.\n offset:\n type: number\n format: double\n x-dcl-go-name: Offset\n description: Lower bound of the first bucket.\n width:\n type: number\n format: double\n x-dcl-go-name: Width\n description: Must be greater than 0.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation timestamp of the metric. This field\n may not be present for older metrics.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. A description of this metric, which is used in documentation.\n The maximum length of the description is 8000 characters.\n disabled:\n type: boolean\n x-dcl-go-name: Disabled\n description: Optional. If set to True, then this metric is disabled and\n it does not generate any points.\n filter:\n type: string\n x-dcl-go-name: Filter\n description: 'Required. An [advanced logs filter](https://cloud.google.com/logging/docs/view/advanced_filters)\n which is used to match log entries. Example: \"resource.type=gae_app AND\n severity>=ERROR\" The maximum length of the filter is 20000 characters.'\n labelExtractors:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: LabelExtractors\n description: Optional. A map from a label key string to an extractor expression\n which is used to extract data from a log entry field and assign as the\n label value. Each label key specified in the LabelDescriptor must have\n an associated extractor expression in this map. The syntax of the extractor\n expression is the same as for the `value_extractor` field. The extracted\n value is converted to the type defined in the label descriptor. If the\n either the extraction or the type conversion fails, the label will have\n a default value. The default value for a string label is an empty string,\n for an integer label its 0, and for a boolean label its `false`. Note\n that there are upper bounds on the maximum number of labels and the number\n of active time series that are allowed in a project.\n metricDescriptor:\n type: object\n x-dcl-go-name: MetricDescriptor\n x-dcl-go-type: LogMetricMetricDescriptor\n description: Optional. The metric descriptor associated with the logs-based\n metric. If unspecified, it uses a default metric descriptor with a DELTA\n metric kind, INT64 value type, with no labels and a unit of \"1\". Such\n a metric counts the number of log entries matching the `filter` expression.\n The `name`, `type`, and `description` fields in the `metric_descriptor`\n are output only, and is constructed using the `name` and `description`\n field in the LogMetric. To create a logs-based metric that records a distribution\n of log values, a DELTA metric kind with a DISTRIBUTION value type must\n be used along with a `value_extractor` expression in the LogMetric. Each\n label in the metric descriptor must have a matching label name as the\n key and an extractor expression as the value in the `label_extractors`\n map. The `metric_kind` and `value_type` fields in the `metric_descriptor`\n cannot be updated once initially configured. New labels can be added in\n the `metric_descriptor`, but existing labels cannot be modified except\n for their description.\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n readOnly: true\n description: A detailed description of the metric, which can be used\n in documentation.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A concise name for the metric, which can be displayed in\n user interfaces. Use sentence case without an ending period, for example\n \"Request count\". This field is optional but it is recommended to be\n set for any metrics associated with user-visible concepts, such as\n Quota.\n labels:\n type: array\n x-dcl-go-name: Labels\n description: The set of labels that can be used to describe a specific\n instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies`\n metric type has a label for the HTTP response code, `response_code`,\n so you can look at latencies for successful responses or just for\n responses that failed.\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: LogMetricMetricDescriptorLabels\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A human-readable description for the label.\n x-kubernetes-immutable: true\n key:\n type: string\n x-dcl-go-name: Key\n description: The label key.\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: LogMetricMetricDescriptorLabelsValueTypeEnum\n description: 'The type of data that can be assigned to the label.\n Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION,\n MONEY'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n - DOUBLE\n - DISTRIBUTION\n - MONEY\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: LogMetricMetricDescriptorLaunchStageEnum\n description: 'Optional. The launch stage of the metric definition. Possible\n values: UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED'\n enum:\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n x-dcl-mutable-unreadable: true\n metadata:\n type: object\n x-dcl-go-name: Metadata\n x-dcl-go-type: LogMetricMetricDescriptorMetadata\n description: Optional. Metadata which can be used to guide usage of\n the metric.\n x-dcl-mutable-unreadable: true\n properties:\n ingestDelay:\n type: string\n x-dcl-go-name: IngestDelay\n description: The delay of data points caused by ingestion. Data\n points older than this age are guaranteed to be ingested and available\n to be read, excluding data loss due to errors.\n samplePeriod:\n type: string\n x-dcl-go-name: SamplePeriod\n description: The sampling period of metric data points. For metrics\n which are written periodically, consecutive data points are stored\n at this time interval, excluding data loss due to errors. Metrics\n with a higher granularity have a smaller sampling period.\n metricKind:\n type: string\n x-dcl-go-name: MetricKind\n x-dcl-go-type: LogMetricMetricDescriptorMetricKindEnum\n description: 'Whether the metric records instantaneous values, changes\n to a value, etc. Some combinations of `metric_kind` and `value_type`\n might not be supported. Possible values: GAUGE, DELTA, CUMULATIVE'\n x-kubernetes-immutable: true\n enum:\n - GAUGE\n - DELTA\n - CUMULATIVE\n monitoredResourceTypes:\n type: array\n x-dcl-go-name: MonitoredResourceTypes\n readOnly: true\n description: Read-only. If present, then a time series, which is identified\n partially by a metric type and a MonitoredResourceDescriptor, that\n is associated with this metric type can only be associated with one\n of the monitored resource types listed here.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n readOnly: true\n description: The resource name of the metric descriptor.\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n readOnly: true\n description: 'The metric type, including its DNS name prefix. The type\n is not URL-encoded. All user-defined metric types have the DNS name\n `custom.googleapis.com` or `external.googleapis.com`. Metric types\n should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\"\n \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\"'\n x-kubernetes-immutable: true\n unit:\n type: string\n x-dcl-go-name: Unit\n description: 'The units in which the metric value is reported. It is\n only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`.\n The `unit` defines the representation of the stored metric values.\n Different systems might scale the values to be more easily displayed\n (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value\n of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit`\n is `kBy`, then the value of the metric is always in thousands of bytes,\n no matter how it might be displayed. If you want a custom metric to\n record the exact number of CPU-seconds used by a job, you can create\n an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently\n `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the\n value is written as `12005`. Alternatively, if you want a custom metric\n to record data in a more granular way, you can create a `DOUBLE CUMULATIVE`\n metric whose `unit` is `ks{CPU}`, and then write the value `12.005`\n (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which\n is `12005/1024`). The supported units are a subset of [The Unified\n Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard:\n **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min`\n minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)**\n * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12)\n * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta\n (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9)\n * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z`\n zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi\n (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar**\n The grammar also includes these connectors: * `/` division or ratio\n (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms`\n (although you should almost never have `/s` in a metric `unit`; rates\n should always be computed at query time from the underlying cumulative\n or delta value). * `.` multiplication or composition (as an infix\n operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a\n unit is as follows: Expression = Component { \".\" Component } { \"/\"\n Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ]\n | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation`\n is just a comment if it follows a `UNIT`. If the annotation is used\n alone, then the unit is equivalent to `1`. For examples, `{request}/s\n == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank\n printable ASCII characters not containing `{` or `}`. * `1` represents\n a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity)\n of 1, such as in `1/s`. It is typically used when none of the basic\n units are appropriate. For example, \"new users per day\" can be represented\n as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5\n new users). Alternatively, \"thousands of page views per day\" would\n be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric\n value of `5.3` would mean \"5300 page views per day\"). * `%` represents\n dimensionless value of 1/100, and annotates values giving a percentage\n (so the metric values are typically in the range of 0..100, and a\n metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric\n contains a ratio, typically in the range 0..1, that will be multiplied\n by 100 and displayed as a percentage (so a metric value `0.03` means\n \"3 percent\").'\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: LogMetricMetricDescriptorValueTypeEnum\n description: 'Whether the measurement is an integer, a floating-point\n number, etc. Some combinations of `metric_kind` and `value_type` might\n not be supported. Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION,\n MONEY'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n - DOUBLE\n - DISTRIBUTION\n - MONEY\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. The client-assigned metric identifier. Examples:\n `\"error_count\"`, `\"nginx/requests\"`. Metric identifiers are limited to\n 100 characters and can include only the following characters: `A-Z`, `a-z`,\n `0-9`, and the special characters `_-.,+!*'',()%/`. The forward-slash\n character (`/`) denotes a hierarchy of name pieces, and it cannot be the\n first character of the name. The metric identifier in this field must\n not be [URL-encoded](https://en.wikipedia.org/wiki/Percent-encoding).\n However, when the metric identifier appears as the `[METRIC_ID]` part\n of a `metric_name` API parameter, then the metric identifier must be URL-encoded.\n Example: `\"projects/my-project/metrics/nginx%2Frequests\"`.'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The resource name of the project in which to create the metric.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last update timestamp of the metric. This\n field may not be present for older metrics.\n x-kubernetes-immutable: true\n valueExtractor:\n type: string\n x-dcl-go-name: ValueExtractor\n description: 'Optional. A `value_extractor` is required when using a distribution\n logs-based metric to extract the values to record from a log entry. Two\n functions are supported for value extraction: `EXTRACT(field)` or `REGEXP_EXTRACT(field,\n regex)`. The argument are: 1. field: The name of the log entry field from\n which the value is to be extracted. 2. regex: A regular expression using\n the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with\n a single capture group to extract data from the specified log entry field.\n The value of the field is converted to a string before applying the regex.\n It is an error to specify a regex that does not include exactly one capture\n group. The result of the extraction must be convertible to a double type,\n as the distribution always records double values. If either the extraction\n or the conversion to double fails, then those values are not recorded\n in the distribution. Example: `REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(d+).*\")`'\n") -// 22059 bytes -// MD5: c3404550568b8db8f3aa1969ac4367e5 +// 22058 bytes +// MD5: a2e241a0cd3ea262a661d622bdb18e0d diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view.go index c1d3727ba4..201797c4e6 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view.go @@ -152,11 +152,10 @@ func (c *Client) GetLogView(ctx context.Context, r *LogView) (*LogView, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Location = nr.Location - result.Bucket = nr.Bucket - result.Parent = nr.Parent - result.Name = nr.Name + result.Location = r.Location + result.Bucket = r.Bucket + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard.go index a8667a6a40..e7d41493e8 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard.go @@ -12638,9 +12638,8 @@ func (c *Client) GetDashboard(ctx context.Context, r *Dashboard) (*Dashboard, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard.yaml index 011dc3b677..af85bb3e35 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard.yaml @@ -223,7 +223,7 @@ components: above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard - with the following four thresholds: { value: 90, category: + with the following four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: ''BELOW'', }, { value: 20, category: @@ -2500,7 +2500,7 @@ components: threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider - a scorecard with the following four thresholds: { value: + a scorecard with the following four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: ''BELOW'', }, { value: @@ -4626,7 +4626,7 @@ components: the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following - four thresholds: { value: 90, category: ''DANGER'', trigger: + four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: ''BELOW'', }, { value: 20, category: ''WARNING'', trigger: @@ -6872,7 +6872,7 @@ components: above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard - with the following four thresholds: { value: 90, category: + with the following four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: ''BELOW'', }, { value: 20, category: diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard_beta_yaml_embed.go index 57577bb6b0..f5e515b2cb 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/beta/dashboard.yaml -var YAML_dashboard = []byte("info:\n title: Monitoring/Dashboard\n description: The Monitoring Dashboard resource\n x-dcl-struct-name: Dashboard\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n apply:\n description: The function used to apply information about a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n delete:\n description: The function used to delete a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n deleteAll:\n description: The function used to delete all Dashboard\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Dashboard\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Dashboard:\n title: Dashboard\n x-dcl-id: projects/{{project}}/dashboards/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - displayName\n - project\n properties:\n columnLayout:\n type: object\n x-dcl-go-name: ColumnLayout\n x-dcl-go-type: DashboardColumnLayout\n description: The content is divided into equally spaced columns and the\n widgets are arranged vertically.\n x-dcl-conflicts:\n - gridLayout\n - mosaicLayout\n - rowLayout\n properties:\n columns:\n type: array\n x-dcl-go-name: Columns\n description: The columns of content to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumns\n properties:\n weight:\n type: integer\n format: int64\n x-dcl-go-name: Weight\n description: The relative weight of this column. The column weight\n is used to adjust the width of columns on the screen (relative\n to peers). Greater the weight, greater the width of the column\n on the screen. If omitted, a value of 1 is used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The display widgets arranged vertically in this column.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries\n to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field\n is optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a\n danger threshold that triggers above. Similarly, if\n x is above/below a warning threshold that triggers\n above/below, then the scorecard is in a warning state\n - unless x also puts it in a danger state. (Danger\n trumps warning.) As an example, consider a scorecard\n with the following four thresholds: { value: 90, category:\n ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category:\n ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category:\n ''DANGER'', trigger: ''BELOW'', }, { value: 20, category:\n ''WARNING'', trigger: ''BELOW'', } Then: values\n less than or equal to 10 would put the scorecard in\n a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal\n to 70 but less than 90 a WARNING state, and values\n greater than or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsText\n description: A raw string or markdown displaying textual\n content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a\n string with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data\n point frequency for this data set, implemented\n by specifying the minimum alignment period to\n use in a time series query For example, if the\n data is published once every 10 minutes, the\n `min_alignment_period` should be at least 10\n minutes. It would not make sense to fetch and\n align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted\n on the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time\n series data is returned. Use this field\n to combine multiple time series for\n different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in\n fetched time series. If non-empty, this\n unit will override any unit that accompanies\n fetched data. The format is the same as\n the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: Required. The mutable, human-readable name.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: \\`etag\\` is used for optimistic concurrency control as a way\n to help prevent simultaneous updates of a policy from overwriting each\n other. An \\`etag\\` is returned in the response to \\`GetDashboard\\`, and\n users are expected to put that etag in the request to \\`UpdateDashboard\\`\n to ensure that their change will be applied to the same version of the\n Dashboard configuration. The field should not be passed during dashboard\n creation.\n x-kubernetes-immutable: true\n gridLayout:\n type: object\n x-dcl-go-name: GridLayout\n x-dcl-go-type: DashboardGridLayout\n description: Content is arranged with a basic layout that re-flows a simple\n list of informational elements like widgets or tiles.\n x-dcl-conflicts:\n - mosaicLayout\n - rowLayout\n - columnLayout\n properties:\n columns:\n type: integer\n format: int64\n x-dcl-go-name: Columns\n description: The number of columns into which the view's width is divided.\n If omitted or set to zero, a system default will be used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The informational elements that are arranged into the columns\n row-first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardGridLayoutWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardGridLayoutWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries to return.\n See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned. An\n empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect logs\n for. Currently only projects are supported. If empty, the\n widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart. The\n value of the chart should always be greater than or\n equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart. The\n value of the chart should always be less than or equal\n to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency in\n the chart implemented by specifying the minimum alignment\n period to use in a time series query. For example, if\n the data is published once every 10 minutes it would\n not make sense to fetch and align data at one minute\n intervals. This field is optional and exists only as\n a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to show\n in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state of\n the scorecard given the time series'' current value. For\n an actual value x, the scorecard is in a danger state if\n x is less than or equal to a danger threshold that triggers\n below, or greater than or equal to a danger threshold that\n triggers above. Similarly, if x is above/below a warning\n threshold that triggers above/below, then the scorecard\n is in a warning state - unless x also puts it in a danger\n state. (Danger trumps warning.) As an example, consider\n a scorecard with the following four thresholds: { value:\n 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value:\n 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value:\n 10, category: ''DANGER'', trigger: ''BELOW'', }, { value:\n 20, category: ''WARNING'', trigger: ''BELOW'', } Then:\n values less than or equal to 10 would put the scorecard\n in a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal to 70\n but less than 90 a WARNING state, and values greater than\n or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold. Color\n is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED,\n GREY, BLUE, GREEN, YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible values:\n DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value should\n be defined in the native scale of the metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series data\n from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources, and\n projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter. Possible\n values: DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow to\n pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied to each\n time series independently to produce the value\n which will be used to compare the time series\n to other time series. Possible values: METHOD_UNSPECIFIED,\n METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after `aggregation`\n is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between two time\n series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter. Possible\n values: DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow to\n pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied to each\n time series independently to produce the value\n which will be used to compare the time series\n to other time series. Possible values: METHOD_UNSPECIFIED,\n METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after the\n ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched time\n series. If non-empty, this unit will override any unit\n that accompanies fetched data. The format is the same\n as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardGridLayoutWidgetsText\n description: A raw string or markdown displaying textual content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardGridLayoutWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a string\n with interpolations of the form `${label_name}`, which\n will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data point\n frequency for this data set, implemented by specifying\n the minimum alignment period to use in a time series\n query For example, if the data is published once every\n 10 minutes, the `min_alignment_period` should be at\n least 10 minutes. It would not make sense to fetch\n and align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted on the\n chart. Possible values: PLOT_TYPE_UNSPECIFIED, LINE,\n STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across the\n chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold. Color\n is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED,\n GREY, BLUE, GREEN, YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible values:\n DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value should\n be defined in the native scale of the metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison chart.\n A comparison chart simultaneously shows values from two\n similar-length time periods (e.g., week-over-week metrics).\n The duration must be positive, and it can only be applied\n to charts with data sets of LINE plot type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear scale\n is used. Possible values: SCALE_UNSPECIFIED, LINEAR,\n LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear scale\n is used. Possible values: SCALE_UNSPECIFIED, LINEAR,\n LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n mosaicLayout:\n type: object\n x-dcl-go-name: MosaicLayout\n x-dcl-go-type: DashboardMosaicLayout\n description: The content is arranged as a grid of tiles, with each content\n widget occupying one or more tiles.\n x-dcl-conflicts:\n - gridLayout\n - rowLayout\n - columnLayout\n properties:\n columns:\n type: integer\n format: int64\n x-dcl-go-name: Columns\n description: The number of columns in the mosaic grid.\n tiles:\n type: array\n x-dcl-go-name: Tiles\n description: The tiles to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTiles\n properties:\n height:\n type: integer\n format: int64\n x-dcl-go-name: Height\n description: The height of the tile, measured in grid squares.\n widget:\n type: object\n x-dcl-go-name: Widget\n x-dcl-go-type: DashboardMosaicLayoutTilesWidget\n description: The informational widget contained in the tile.\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries to\n return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field is\n optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a danger\n threshold that triggers above. Similarly, if x is above/below\n a warning threshold that triggers above/below, then\n the scorecard is in a warning state - unless x also\n puts it in a danger state. (Danger trumps warning.) As\n an example, consider a scorecard with the following\n four thresholds: { value: 90, category: ''DANGER'', trigger:\n ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger:\n ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger:\n ''BELOW'', }, { value: 20, category: ''WARNING'', trigger:\n ''BELOW'', } Then: values less than or equal to 10\n would put the scorecard in a DANGER state, values greater\n than 10 but less than or equal to 20 a WARNING state,\n values strictly between 20 and 70 an OK state, values\n greater than or equal to 70 but less than 90 a WARNING\n state, and values greater than or equal to 90 a DANGER\n state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible values:\n COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW,\n ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter.\n Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series. Possible\n values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between two\n time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is\n used to divide the data in all the [time\n series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series into\n a single time series, where the value\n of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time series.\n Reduction can yield a time series with\n a different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each individual\n time series is a member of exactly one\n subset. The `cross_series_reducer` is\n applied to each subset of time series.\n It is not possible to reduce across\n different resource types, so this field\n implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are\n aggregated away. If `group_by_fields`\n is not specified and all the time series\n have the same resource type, then the\n time series are aggregated into a single\n output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single\n time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on\n the `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified\n and not equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error\n is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is\n used to divide the data in all the [time\n series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series into\n a single time series, where the value\n of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time series.\n Reduction can yield a time series with\n a different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each individual\n time series is a member of exactly one\n subset. The `cross_series_reducer` is\n applied to each subset of time series.\n It is not possible to reduce across\n different resource types, so this field\n implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are\n aggregated away. If `group_by_fields`\n is not specified and all the time series\n have the same resource type, then the\n time series are aggregated into a single\n output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single\n time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on\n the `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified\n and not equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error\n is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter.\n Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series. Possible\n values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetText\n description: A raw string or markdown displaying textual content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a string\n with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data point\n frequency for this data set, implemented by specifying\n the minimum alignment period to use in a time\n series query For example, if the data is published\n once every 10 minutes, the `min_alignment_period`\n should be at least 10 minutes. It would not make\n sense to fetch and align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted on\n the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass through\n the filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to\n allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently\n to produce the value which will be\n used to compare the time series to\n other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX,\n METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the\n data. The value must be at least\n 60 seconds. If a per-series aligner\n other than `ALIGN_NONE` is specified,\n this field is required or an error\n is returned. If no per-series\n aligner is specified, or the aligner\n `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not\n all reducer operations can be\n applied to all time series. The\n valid choices depend on the `metric_kind`\n and the `value_type` of the original\n time series. Reduction can yield\n a time series with a different\n `metric_kind` or `value_type`\n than the input time series. Time\n series data must first be aligned\n (see `per_series_aligner`) in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets prior\n to applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to\n reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the\n data. The value must be at least\n 60 seconds. If a per-series aligner\n other than `ALIGN_NONE` is specified,\n this field is required or an error\n is returned. If no per-series\n aligner is specified, or the aligner\n `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not\n all reducer operations can be\n applied to all time series. The\n valid choices depend on the `metric_kind`\n and the `value_type` of the original\n time series. Reduction can yield\n a time series with a different\n `metric_kind` or `value_type`\n than the input time series. Time\n series data must first be aligned\n (see `per_series_aligner`) in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets prior\n to applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to\n reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass through\n the filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to\n allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently\n to produce the value which will be\n used to compare the time series to\n other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX,\n METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will\n override any unit that accompanies fetched\n data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible values:\n COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW,\n ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n width:\n type: integer\n format: int64\n x-dcl-go-name: Width\n description: The width of the tile, measured in grid squares.\n xPos:\n type: integer\n format: int64\n x-dcl-go-name: XPos\n description: The zero-indexed position of the tile in grid squares\n relative to the left edge of the grid.\n yPos:\n type: integer\n format: int64\n x-dcl-go-name: YPos\n description: The zero-indexed position of the tile in grid squares\n relative to the top edge of the grid.\n name:\n type: string\n x-dcl-go-name: Name\n description: Immutable. The resource name of the dashboard.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project id of the resource.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n rowLayout:\n type: object\n x-dcl-go-name: RowLayout\n x-dcl-go-type: DashboardRowLayout\n description: The content is divided into equally spaced rows and the widgets\n are arranged horizontally.\n x-dcl-conflicts:\n - gridLayout\n - mosaicLayout\n - columnLayout\n properties:\n rows:\n type: array\n x-dcl-go-name: Rows\n description: The rows of content to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRows\n properties:\n weight:\n type: integer\n format: int64\n x-dcl-go-name: Weight\n description: The relative weight of this row. The row weight is\n used to adjust the height of rows on the screen (relative to\n peers). Greater the weight, greater the height of the row on\n the screen. If omitted, a value of 1 is used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The display widgets arranged horizontally in this\n row.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries\n to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field\n is optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a\n danger threshold that triggers above. Similarly, if\n x is above/below a warning threshold that triggers\n above/below, then the scorecard is in a warning state\n - unless x also puts it in a danger state. (Danger\n trumps warning.) As an example, consider a scorecard\n with the following four thresholds: { value: 90, category:\n ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category:\n ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category:\n ''DANGER'', trigger: ''BELOW'', }, { value: 20, category:\n ''WARNING'', trigger: ''BELOW'', } Then: values\n less than or equal to 10 would put the scorecard in\n a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal\n to 70 but less than 90 a WARNING state, and values\n greater than or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsText\n description: A raw string or markdown displaying textual\n content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a\n string with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data\n point frequency for this data set, implemented\n by specifying the minimum alignment period to\n use in a time series query For example, if the\n data is published once every 10 minutes, the\n `min_alignment_period` should be at least 10\n minutes. It would not make sense to fetch and\n align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted\n on the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time\n series data is returned. Use this field\n to combine multiple time series for\n different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in\n fetched time series. If non-empty, this\n unit will override any unit that accompanies\n fetched data. The format is the same as\n the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n") +var YAML_dashboard = []byte("info:\n title: Monitoring/Dashboard\n description: The Monitoring Dashboard resource\n x-dcl-struct-name: Dashboard\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n apply:\n description: The function used to apply information about a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n delete:\n description: The function used to delete a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n deleteAll:\n description: The function used to delete all Dashboard\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Dashboard\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Dashboard:\n title: Dashboard\n x-dcl-id: projects/{{project}}/dashboards/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - displayName\n - project\n properties:\n columnLayout:\n type: object\n x-dcl-go-name: ColumnLayout\n x-dcl-go-type: DashboardColumnLayout\n description: The content is divided into equally spaced columns and the\n widgets are arranged vertically.\n x-dcl-conflicts:\n - gridLayout\n - mosaicLayout\n - rowLayout\n properties:\n columns:\n type: array\n x-dcl-go-name: Columns\n description: The columns of content to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumns\n properties:\n weight:\n type: integer\n format: int64\n x-dcl-go-name: Weight\n description: The relative weight of this column. The column weight\n is used to adjust the width of columns on the screen (relative\n to peers). Greater the weight, greater the width of the column\n on the screen. If omitted, a value of 1 is used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The display widgets arranged vertically in this column.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries\n to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field\n is optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a\n danger threshold that triggers above. Similarly, if\n x is above/below a warning threshold that triggers\n above/below, then the scorecard is in a warning state\n - unless x also puts it in a danger state. (Danger\n trumps warning.) As an example, consider a scorecard\n with the following four thresholds { value: 90, category:\n ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category:\n ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category:\n ''DANGER'', trigger: ''BELOW'', }, { value: 20, category:\n ''WARNING'', trigger: ''BELOW'', } Then: values\n less than or equal to 10 would put the scorecard in\n a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal\n to 70 but less than 90 a WARNING state, and values\n greater than or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsText\n description: A raw string or markdown displaying textual\n content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a\n string with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data\n point frequency for this data set, implemented\n by specifying the minimum alignment period to\n use in a time series query For example, if the\n data is published once every 10 minutes, the\n `min_alignment_period` should be at least 10\n minutes. It would not make sense to fetch and\n align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted\n on the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time\n series data is returned. Use this field\n to combine multiple time series for\n different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in\n fetched time series. If non-empty, this\n unit will override any unit that accompanies\n fetched data. The format is the same as\n the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: Required. The mutable, human-readable name.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: \\`etag\\` is used for optimistic concurrency control as a way\n to help prevent simultaneous updates of a policy from overwriting each\n other. An \\`etag\\` is returned in the response to \\`GetDashboard\\`, and\n users are expected to put that etag in the request to \\`UpdateDashboard\\`\n to ensure that their change will be applied to the same version of the\n Dashboard configuration. The field should not be passed during dashboard\n creation.\n x-kubernetes-immutable: true\n gridLayout:\n type: object\n x-dcl-go-name: GridLayout\n x-dcl-go-type: DashboardGridLayout\n description: Content is arranged with a basic layout that re-flows a simple\n list of informational elements like widgets or tiles.\n x-dcl-conflicts:\n - mosaicLayout\n - rowLayout\n - columnLayout\n properties:\n columns:\n type: integer\n format: int64\n x-dcl-go-name: Columns\n description: The number of columns into which the view's width is divided.\n If omitted or set to zero, a system default will be used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The informational elements that are arranged into the columns\n row-first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardGridLayoutWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardGridLayoutWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries to return.\n See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned. An\n empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect logs\n for. Currently only projects are supported. If empty, the\n widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart. The\n value of the chart should always be greater than or\n equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart. The\n value of the chart should always be less than or equal\n to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency in\n the chart implemented by specifying the minimum alignment\n period to use in a time series query. For example, if\n the data is published once every 10 minutes it would\n not make sense to fetch and align data at one minute\n intervals. This field is optional and exists only as\n a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to show\n in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state of\n the scorecard given the time series'' current value. For\n an actual value x, the scorecard is in a danger state if\n x is less than or equal to a danger threshold that triggers\n below, or greater than or equal to a danger threshold that\n triggers above. Similarly, if x is above/below a warning\n threshold that triggers above/below, then the scorecard\n is in a warning state - unless x also puts it in a danger\n state. (Danger trumps warning.) As an example, consider\n a scorecard with the following four thresholds { value:\n 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value:\n 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value:\n 10, category: ''DANGER'', trigger: ''BELOW'', }, { value:\n 20, category: ''WARNING'', trigger: ''BELOW'', } Then:\n values less than or equal to 10 would put the scorecard\n in a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal to 70\n but less than 90 a WARNING state, and values greater than\n or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold. Color\n is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED,\n GREY, BLUE, GREEN, YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible values:\n DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value should\n be defined in the native scale of the metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series data\n from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources, and\n projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter. Possible\n values: DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow to\n pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied to each\n time series independently to produce the value\n which will be used to compare the time series\n to other time series. Possible values: METHOD_UNSPECIFIED,\n METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after `aggregation`\n is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between two time\n series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter. Possible\n values: DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow to\n pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied to each\n time series independently to produce the value\n which will be used to compare the time series\n to other time series. Possible values: METHOD_UNSPECIFIED,\n METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after the\n ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched time\n series. If non-empty, this unit will override any unit\n that accompanies fetched data. The format is the same\n as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardGridLayoutWidgetsText\n description: A raw string or markdown displaying textual content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardGridLayoutWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a string\n with interpolations of the form `${label_name}`, which\n will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data point\n frequency for this data set, implemented by specifying\n the minimum alignment period to use in a time series\n query For example, if the data is published once every\n 10 minutes, the `min_alignment_period` should be at\n least 10 minutes. It would not make sense to fetch\n and align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted on the\n chart. Possible values: PLOT_TYPE_UNSPECIFIED, LINE,\n STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across the\n chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold. Color\n is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED,\n GREY, BLUE, GREEN, YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible values:\n DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value should\n be defined in the native scale of the metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison chart.\n A comparison chart simultaneously shows values from two\n similar-length time periods (e.g., week-over-week metrics).\n The duration must be positive, and it can only be applied\n to charts with data sets of LINE plot type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear scale\n is used. Possible values: SCALE_UNSPECIFIED, LINEAR,\n LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear scale\n is used. Possible values: SCALE_UNSPECIFIED, LINEAR,\n LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n mosaicLayout:\n type: object\n x-dcl-go-name: MosaicLayout\n x-dcl-go-type: DashboardMosaicLayout\n description: The content is arranged as a grid of tiles, with each content\n widget occupying one or more tiles.\n x-dcl-conflicts:\n - gridLayout\n - rowLayout\n - columnLayout\n properties:\n columns:\n type: integer\n format: int64\n x-dcl-go-name: Columns\n description: The number of columns in the mosaic grid.\n tiles:\n type: array\n x-dcl-go-name: Tiles\n description: The tiles to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTiles\n properties:\n height:\n type: integer\n format: int64\n x-dcl-go-name: Height\n description: The height of the tile, measured in grid squares.\n widget:\n type: object\n x-dcl-go-name: Widget\n x-dcl-go-type: DashboardMosaicLayoutTilesWidget\n description: The informational widget contained in the tile.\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries to\n return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field is\n optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a danger\n threshold that triggers above. Similarly, if x is above/below\n a warning threshold that triggers above/below, then\n the scorecard is in a warning state - unless x also\n puts it in a danger state. (Danger trumps warning.) As\n an example, consider a scorecard with the following\n four thresholds { value: 90, category: ''DANGER'', trigger:\n ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger:\n ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger:\n ''BELOW'', }, { value: 20, category: ''WARNING'', trigger:\n ''BELOW'', } Then: values less than or equal to 10\n would put the scorecard in a DANGER state, values greater\n than 10 but less than or equal to 20 a WARNING state,\n values strictly between 20 and 70 an OK state, values\n greater than or equal to 70 but less than 90 a WARNING\n state, and values greater than or equal to 90 a DANGER\n state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible values:\n COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW,\n ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter.\n Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series. Possible\n values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between two\n time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is\n used to divide the data in all the [time\n series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series into\n a single time series, where the value\n of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time series.\n Reduction can yield a time series with\n a different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each individual\n time series is a member of exactly one\n subset. The `cross_series_reducer` is\n applied to each subset of time series.\n It is not possible to reduce across\n different resource types, so this field\n implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are\n aggregated away. If `group_by_fields`\n is not specified and all the time series\n have the same resource type, then the\n time series are aggregated into a single\n output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single\n time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on\n the `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified\n and not equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error\n is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is\n used to divide the data in all the [time\n series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series into\n a single time series, where the value\n of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time series.\n Reduction can yield a time series with\n a different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each individual\n time series is a member of exactly one\n subset. The `cross_series_reducer` is\n applied to each subset of time series.\n It is not possible to reduce across\n different resource types, so this field\n implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are\n aggregated away. If `group_by_fields`\n is not specified and all the time series\n have the same resource type, then the\n time series are aggregated into a single\n output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single\n time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on\n the `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified\n and not equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error\n is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter.\n Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series. Possible\n values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetText\n description: A raw string or markdown displaying textual content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a string\n with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data point\n frequency for this data set, implemented by specifying\n the minimum alignment period to use in a time\n series query For example, if the data is published\n once every 10 minutes, the `min_alignment_period`\n should be at least 10 minutes. It would not make\n sense to fetch and align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted on\n the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass through\n the filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to\n allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently\n to produce the value which will be\n used to compare the time series to\n other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX,\n METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the\n data. The value must be at least\n 60 seconds. If a per-series aligner\n other than `ALIGN_NONE` is specified,\n this field is required or an error\n is returned. If no per-series\n aligner is specified, or the aligner\n `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not\n all reducer operations can be\n applied to all time series. The\n valid choices depend on the `metric_kind`\n and the `value_type` of the original\n time series. Reduction can yield\n a time series with a different\n `metric_kind` or `value_type`\n than the input time series. Time\n series data must first be aligned\n (see `per_series_aligner`) in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets prior\n to applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to\n reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the\n data. The value must be at least\n 60 seconds. If a per-series aligner\n other than `ALIGN_NONE` is specified,\n this field is required or an error\n is returned. If no per-series\n aligner is specified, or the aligner\n `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not\n all reducer operations can be\n applied to all time series. The\n valid choices depend on the `metric_kind`\n and the `value_type` of the original\n time series. Reduction can yield\n a time series with a different\n `metric_kind` or `value_type`\n than the input time series. Time\n series data must first be aligned\n (see `per_series_aligner`) in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets prior\n to applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to\n reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass through\n the filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to\n allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently\n to produce the value which will be\n used to compare the time series to\n other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX,\n METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will\n override any unit that accompanies fetched\n data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible values:\n COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW,\n ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n width:\n type: integer\n format: int64\n x-dcl-go-name: Width\n description: The width of the tile, measured in grid squares.\n xPos:\n type: integer\n format: int64\n x-dcl-go-name: XPos\n description: The zero-indexed position of the tile in grid squares\n relative to the left edge of the grid.\n yPos:\n type: integer\n format: int64\n x-dcl-go-name: YPos\n description: The zero-indexed position of the tile in grid squares\n relative to the top edge of the grid.\n name:\n type: string\n x-dcl-go-name: Name\n description: Immutable. The resource name of the dashboard.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project id of the resource.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n rowLayout:\n type: object\n x-dcl-go-name: RowLayout\n x-dcl-go-type: DashboardRowLayout\n description: The content is divided into equally spaced rows and the widgets\n are arranged horizontally.\n x-dcl-conflicts:\n - gridLayout\n - mosaicLayout\n - columnLayout\n properties:\n rows:\n type: array\n x-dcl-go-name: Rows\n description: The rows of content to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRows\n properties:\n weight:\n type: integer\n format: int64\n x-dcl-go-name: Weight\n description: The relative weight of this row. The row weight is\n used to adjust the height of rows on the screen (relative to\n peers). Greater the weight, greater the height of the row on\n the screen. If omitted, a value of 1 is used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The display widgets arranged horizontally in this\n row.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries\n to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field\n is optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a\n danger threshold that triggers above. Similarly, if\n x is above/below a warning threshold that triggers\n above/below, then the scorecard is in a warning state\n - unless x also puts it in a danger state. (Danger\n trumps warning.) As an example, consider a scorecard\n with the following four thresholds { value: 90, category:\n ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category:\n ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category:\n ''DANGER'', trigger: ''BELOW'', }, { value: 20, category:\n ''WARNING'', trigger: ''BELOW'', } Then: values\n less than or equal to 10 would put the scorecard in\n a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal\n to 70 but less than 90 a WARNING state, and values\n greater than or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsText\n description: A raw string or markdown displaying textual\n content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a\n string with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data\n point frequency for this data set, implemented\n by specifying the minimum alignment period to\n use in a time series query For example, if the\n data is published once every 10 minutes, the\n `min_alignment_period` should be at least 10\n minutes. It would not make sense to fetch and\n align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted\n on the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time\n series data is returned. Use this field\n to combine multiple time series for\n different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in\n fetched time series. If non-empty, this\n unit will override any unit that accompanies\n fetched data. The format is the same as\n the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n") -// 655465 bytes -// MD5: db4741069b924e50ce03606ded32f291 +// 655461 bytes +// MD5: 502c0463d908e557d254dfe89f64940b diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard_schema.go index f56b08140c..49ffe1f536 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/dashboard_schema.go @@ -245,7 +245,7 @@ func DCLDashboardSchema() *dcl.Schema { "thresholds": &dcl.Property{ Type: "array", GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", + Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", SendEmpty: true, ListType: "list", Items: &dcl.Property{ @@ -1732,7 +1732,7 @@ func DCLDashboardSchema() *dcl.Schema { "thresholds": &dcl.Property{ Type: "array", GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", + Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", SendEmpty: true, ListType: "list", Items: &dcl.Property{ @@ -3216,7 +3216,7 @@ func DCLDashboardSchema() *dcl.Schema { "thresholds": &dcl.Property{ Type: "array", GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", + Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", SendEmpty: true, ListType: "list", Items: &dcl.Property{ @@ -4737,7 +4737,7 @@ func DCLDashboardSchema() *dcl.Schema { "thresholds": &dcl.Property{ Type: "array", GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", + Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", SendEmpty: true, ListType: "list", Items: &dcl.Property{ diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/group.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/group.go index e99fad078d..e5623ec184 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/group.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/group.go @@ -146,9 +146,8 @@ func (c *Client) GetGroup(ctx context.Context, r *Group) (*Group, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/group_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/group_internal.go index 190324ca14..ae22a8e3e1 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/group_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/group_internal.go @@ -311,11 +311,8 @@ func (op *createGroupOperation) do(ctx context.Context, r *Group, c *Client) err op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetGroup(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor.go index d6da263e51..1a107a5a07 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor.go @@ -399,9 +399,8 @@ func (c *Client) GetMetricDescriptor(ctx context.Context, r *MetricDescriptor) ( if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Type = nr.Type + result.Project = r.Project + result.Type = r.Type c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor.yaml index f0142ba1ae..313684d684 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor.yaml @@ -265,7 +265,7 @@ components: never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. - The grammar for a unit is as follows: Expression = Component: { "." Component + The grammar for a unit is as follows: Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor_beta_yaml_embed.go index 391234e554..e40fe43e1f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/beta/metric_descriptor.yaml -var YAML_metric_descriptor = []byte("info:\n title: Monitoring/MetricDescriptor\n description: The Monitoring MetricDescriptor resource\n x-dcl-struct-name: MetricDescriptor\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n apply:\n description: The function used to apply information about a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n delete:\n description: The function used to delete a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n deleteAll:\n description: The function used to delete all MetricDescriptor\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many MetricDescriptor\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n MetricDescriptor:\n title: MetricDescriptor\n x-dcl-id: projects/{{project}}/metricDescriptors/{{type}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - type\n - metricKind\n - valueType\n - project\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A detailed description of the metric, which can be used in\n documentation.\n x-kubernetes-immutable: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A concise name for the metric, which can be displayed in user\n interfaces. Use sentence case without an ending period, for example \"Request\n count\". This field is optional but it is recommended to be set for any\n metrics associated with user-visible concepts, such as Quota.\n x-kubernetes-immutable: true\n labels:\n type: array\n x-dcl-go-name: Labels\n description: The set of labels that can be used to describe a specific instance\n of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies`\n metric type has a label for the HTTP response code, `response_code`, so\n you can look at latencies for successful responses or just for responses\n that failed.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: MetricDescriptorLabels\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A human-readable description for the label.\n x-kubernetes-immutable: true\n key:\n type: string\n x-dcl-go-name: Key\n description: 'The key for this label. The key must meet the following\n criteria: * Does not exceed 100 characters. * Matches the following\n regular expression: `a-zA-Z*` * The first character must be an upper-\n or lower-case letter. * The remaining characters must be letters,\n digits, or underscores.'\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: MetricDescriptorLabelsValueTypeEnum\n description: 'The type of data that can be assigned to the label.\n Possible values: STRING, BOOL, INT64'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: MetricDescriptorLaunchStageEnum\n description: 'Optional. The launch stage of the metric definition. Possible\n values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS,\n ALPHA, BETA, GA, DEPRECATED'\n x-kubernetes-immutable: true\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n x-dcl-mutable-unreadable: true\n metadata:\n type: object\n x-dcl-go-name: Metadata\n x-dcl-go-type: MetricDescriptorMetadata\n description: Optional. Metadata which can be used to guide usage of the\n metric.\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n properties:\n ingestDelay:\n type: string\n x-dcl-go-name: IngestDelay\n description: The delay of data points caused by ingestion. Data points\n older than this age are guaranteed to be ingested and available to\n be read, excluding data loss due to errors.\n x-kubernetes-immutable: true\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: MetricDescriptorMetadataLaunchStageEnum\n description: 'Deprecated. Must use the MetricDescriptor.launch_stage\n instead. Possible values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED,\n PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED'\n x-kubernetes-immutable: true\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n samplePeriod:\n type: string\n x-dcl-go-name: SamplePeriod\n description: The sampling period of metric data points. For metrics\n which are written periodically, consecutive data points are stored\n at this time interval, excluding data loss due to errors. Metrics\n with a higher granularity have a smaller sampling period.\n x-kubernetes-immutable: true\n metricKind:\n type: string\n x-dcl-go-name: MetricKind\n x-dcl-go-type: MetricDescriptorMetricKindEnum\n description: 'Whether the metric records instantaneous values, changes to\n a value, etc. Some combinations of `metric_kind` and `value_type` might\n not be supported. Possible values: METRIC_KIND_UNSPECIFIED, GAUGE, DELTA,\n CUMULATIVE'\n x-kubernetes-immutable: true\n enum:\n - METRIC_KIND_UNSPECIFIED\n - GAUGE\n - DELTA\n - CUMULATIVE\n monitoredResourceTypes:\n type: array\n x-dcl-go-name: MonitoredResourceTypes\n readOnly: true\n description: Read-only. If present, then a time series, which is identified\n partially by a metric type and a MonitoredResourceDescriptor, that is\n associated with this metric type can only be associated with one of the\n monitored resource types listed here.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n selfLink:\n type: string\n x-dcl-go-name: SelfLink\n readOnly: true\n description: The resource name of the metric descriptor.\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n description: 'The metric type, including its DNS name prefix. The type is\n not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com`\n or `external.googleapis.com`. Metric types should use a natural hierarchical\n grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\"\n \"appengine.googleapis.com/http/server/response_latencies\"'\n x-kubernetes-immutable: true\n x-dcl-forward-slash-allowed: true\n unit:\n type: string\n x-dcl-go-name: Unit\n description: 'The units in which the metric value is reported. It is only\n applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`.\n The `unit` defines the representation of the stored metric values. Different\n systems might scale the values to be more easily displayed (so a value\n of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy`\n _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then\n the value of the metric is always in thousands of bytes, no matter how\n it might be displayed. If you want a custom metric to record the exact\n number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE`\n metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`).\n If the job uses 12,005 CPU-seconds, then the value is written as `12005`.\n Alternatively, if you want a custom metric to record data in a more granular\n way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`,\n and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}`\n and write `11.723` (which is `12005/1024`). The supported units are a\n subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html)\n standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second\n * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)**\n * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12)\n * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24)\n * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico\n (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21)\n * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi\n (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also\n includes these connectors: * `/` division or ratio (as an infix operator).\n For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost\n never have `/s` in a metric `unit`; rates should always be computed at\n query time from the underlying cumulative or delta value). * `.` multiplication\n or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`.\n The grammar for a unit is as follows: Expression = Component: { \".\" Component\n } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation\n ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation`\n is just a comment if it follows a `UNIT`. If the annotation is used alone,\n then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`,\n `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable\n ASCII characters not containing `{` or `}`. * `1` represents a unitary\n [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity)\n of 1, such as in `1/s`. It is typically used when none of the basic units\n are appropriate. For example, \"new users per day\" can be represented as\n `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users).\n Alternatively, \"thousands of page views per day\" would be represented\n as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3`\n would mean \"5300 page views per day\"). * `%` represents dimensionless\n value of 1/100, and annotates values giving a percentage (so the metric\n values are typically in the range of 0..100, and a metric value `3` means\n \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically\n in the range 0..1, that will be multiplied by 100 and displayed as a percentage\n (so a metric value `0.03` means \"3 percent\").'\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: MetricDescriptorValueTypeEnum\n description: 'Whether the measurement is an integer, a floating-point number,\n etc. Some combinations of `metric_kind` and `value_type` might not be\n supported. Possible values: STRING, BOOL, INT64'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n") +var YAML_metric_descriptor = []byte("info:\n title: Monitoring/MetricDescriptor\n description: The Monitoring MetricDescriptor resource\n x-dcl-struct-name: MetricDescriptor\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n apply:\n description: The function used to apply information about a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n delete:\n description: The function used to delete a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n deleteAll:\n description: The function used to delete all MetricDescriptor\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many MetricDescriptor\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n MetricDescriptor:\n title: MetricDescriptor\n x-dcl-id: projects/{{project}}/metricDescriptors/{{type}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - type\n - metricKind\n - valueType\n - project\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A detailed description of the metric, which can be used in\n documentation.\n x-kubernetes-immutable: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A concise name for the metric, which can be displayed in user\n interfaces. Use sentence case without an ending period, for example \"Request\n count\". This field is optional but it is recommended to be set for any\n metrics associated with user-visible concepts, such as Quota.\n x-kubernetes-immutable: true\n labels:\n type: array\n x-dcl-go-name: Labels\n description: The set of labels that can be used to describe a specific instance\n of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies`\n metric type has a label for the HTTP response code, `response_code`, so\n you can look at latencies for successful responses or just for responses\n that failed.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: MetricDescriptorLabels\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A human-readable description for the label.\n x-kubernetes-immutable: true\n key:\n type: string\n x-dcl-go-name: Key\n description: 'The key for this label. The key must meet the following\n criteria: * Does not exceed 100 characters. * Matches the following\n regular expression: `a-zA-Z*` * The first character must be an upper-\n or lower-case letter. * The remaining characters must be letters,\n digits, or underscores.'\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: MetricDescriptorLabelsValueTypeEnum\n description: 'The type of data that can be assigned to the label.\n Possible values: STRING, BOOL, INT64'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: MetricDescriptorLaunchStageEnum\n description: 'Optional. The launch stage of the metric definition. Possible\n values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS,\n ALPHA, BETA, GA, DEPRECATED'\n x-kubernetes-immutable: true\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n x-dcl-mutable-unreadable: true\n metadata:\n type: object\n x-dcl-go-name: Metadata\n x-dcl-go-type: MetricDescriptorMetadata\n description: Optional. Metadata which can be used to guide usage of the\n metric.\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n properties:\n ingestDelay:\n type: string\n x-dcl-go-name: IngestDelay\n description: The delay of data points caused by ingestion. Data points\n older than this age are guaranteed to be ingested and available to\n be read, excluding data loss due to errors.\n x-kubernetes-immutable: true\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: MetricDescriptorMetadataLaunchStageEnum\n description: 'Deprecated. Must use the MetricDescriptor.launch_stage\n instead. Possible values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED,\n PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED'\n x-kubernetes-immutable: true\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n samplePeriod:\n type: string\n x-dcl-go-name: SamplePeriod\n description: The sampling period of metric data points. For metrics\n which are written periodically, consecutive data points are stored\n at this time interval, excluding data loss due to errors. Metrics\n with a higher granularity have a smaller sampling period.\n x-kubernetes-immutable: true\n metricKind:\n type: string\n x-dcl-go-name: MetricKind\n x-dcl-go-type: MetricDescriptorMetricKindEnum\n description: 'Whether the metric records instantaneous values, changes to\n a value, etc. Some combinations of `metric_kind` and `value_type` might\n not be supported. Possible values: METRIC_KIND_UNSPECIFIED, GAUGE, DELTA,\n CUMULATIVE'\n x-kubernetes-immutable: true\n enum:\n - METRIC_KIND_UNSPECIFIED\n - GAUGE\n - DELTA\n - CUMULATIVE\n monitoredResourceTypes:\n type: array\n x-dcl-go-name: MonitoredResourceTypes\n readOnly: true\n description: Read-only. If present, then a time series, which is identified\n partially by a metric type and a MonitoredResourceDescriptor, that is\n associated with this metric type can only be associated with one of the\n monitored resource types listed here.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n selfLink:\n type: string\n x-dcl-go-name: SelfLink\n readOnly: true\n description: The resource name of the metric descriptor.\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n description: 'The metric type, including its DNS name prefix. The type is\n not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com`\n or `external.googleapis.com`. Metric types should use a natural hierarchical\n grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\"\n \"appengine.googleapis.com/http/server/response_latencies\"'\n x-kubernetes-immutable: true\n x-dcl-forward-slash-allowed: true\n unit:\n type: string\n x-dcl-go-name: Unit\n description: 'The units in which the metric value is reported. It is only\n applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`.\n The `unit` defines the representation of the stored metric values. Different\n systems might scale the values to be more easily displayed (so a value\n of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy`\n _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then\n the value of the metric is always in thousands of bytes, no matter how\n it might be displayed. If you want a custom metric to record the exact\n number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE`\n metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`).\n If the job uses 12,005 CPU-seconds, then the value is written as `12005`.\n Alternatively, if you want a custom metric to record data in a more granular\n way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`,\n and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}`\n and write `11.723` (which is `12005/1024`). The supported units are a\n subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html)\n standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second\n * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)**\n * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12)\n * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24)\n * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico\n (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21)\n * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi\n (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also\n includes these connectors: * `/` division or ratio (as an infix operator).\n For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost\n never have `/s` in a metric `unit`; rates should always be computed at\n query time from the underlying cumulative or delta value). * `.` multiplication\n or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`.\n The grammar for a unit is as follows: Expression = Component { \".\" Component\n } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation\n ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation`\n is just a comment if it follows a `UNIT`. If the annotation is used alone,\n then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`,\n `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable\n ASCII characters not containing `{` or `}`. * `1` represents a unitary\n [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity)\n of 1, such as in `1/s`. It is typically used when none of the basic units\n are appropriate. For example, \"new users per day\" can be represented as\n `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users).\n Alternatively, \"thousands of page views per day\" would be represented\n as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3`\n would mean \"5300 page views per day\"). * `%` represents dimensionless\n value of 1/100, and annotates values giving a percentage (so the metric\n values are typically in the range of 0..100, and a metric value `3` means\n \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically\n in the range 0..1, that will be multiplied by 100 and displayed as a percentage\n (so a metric value `0.03` means \"3 percent\").'\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: MetricDescriptorValueTypeEnum\n description: 'Whether the measurement is an integer, a floating-point number,\n etc. Some combinations of `metric_kind` and `value_type` might not be\n supported. Possible values: STRING, BOOL, INT64'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n") -// 13522 bytes -// MD5: 79db6507b19e533c588da0a300225562 +// 13521 bytes +// MD5: 9b08860b811ef86fd547161cfb28665e diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor_schema.go index 7a40010f66..587f4e83d6 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metric_descriptor_schema.go @@ -259,7 +259,7 @@ func DCLMetricDescriptorSchema() *dcl.Schema { "unit": &dcl.Property{ Type: "string", GoName: "Unit", - Description: "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component: { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", + Description: "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", Immutable: true, }, "valueType": &dcl.Property{ diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metrics_scope.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metrics_scope.go index 21fda10c32..858b94570f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metrics_scope.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metrics_scope.go @@ -142,8 +142,7 @@ func (c *Client) GetMetricsScope(ctx context.Context, r *MetricsScope) (*Metrics if err != nil { return nil, err } - nr := r.urlNormalized() - result.Name = nr.Name + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metrics_scope_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metrics_scope_internal.go index 2cd21548cb..5bc815c202 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metrics_scope_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/metrics_scope_internal.go @@ -442,7 +442,7 @@ func expandMetricsScope(c *Client, f *MetricsScope) (map[string]interface{}, err m := make(map[string]interface{}) res := f _ = res - if v, err := dcl.DeriveField("locations/global/metricsScope/%s", f.Name, dcl.SelfLinkToName(f.Name)); err != nil { + if v, err := dcl.ExpandProjectIDsToNumbers(c.Config, f.Name); err != nil { return nil, fmt.Errorf("error expanding Name into name: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { m["name"] = v @@ -463,7 +463,7 @@ func flattenMetricsScope(c *Client, i interface{}, res *MetricsScope) *MetricsSc } resultRes := &MetricsScope{} - resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Name = dcl.FlattenProjectNumbersToIDs(c.Config, dcl.FlattenString(m["name"])) resultRes.CreateTime = dcl.FlattenString(m["createTime"]) resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) resultRes.MonitoredProjects = flattenMetricsScopeMonitoredProjectsSlice(c, m["monitoredProjects"], res) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/notification_channel.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/notification_channel.go index d60d6a810d..fa27bfaf12 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/notification_channel.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/notification_channel.go @@ -179,9 +179,8 @@ func (c *Client) GetNotificationChannel(ctx context.Context, r *NotificationChan if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name if dcl.IsZeroValue(result.Enabled) { result.Enabled = dcl.Bool(true) } diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/notification_channel_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/notification_channel_internal.go index 6ab3e884c3..34dfa00048 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/notification_channel_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/notification_channel_internal.go @@ -313,11 +313,8 @@ func (op *createNotificationChannelOperation) do(ctx context.Context, r *Notific op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetNotificationChannel(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/service.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/service.go index 57fa66e7b1..d3b59c970c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/service.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/service.go @@ -237,9 +237,8 @@ func (c *Client) GetService(ctx context.Context, r *Service) (*Service, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name if dcl.IsZeroValue(result.Custom) { result.Custom = &ServiceCustom{} } diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/service_level_objective.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/service_level_objective.go index 2d85f7cbd5..58b3977fa6 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/service_level_objective.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/service_level_objective.go @@ -1548,10 +1548,9 @@ func (c *Client) GetServiceLevelObjective(ctx context.Context, r *ServiceLevelOb if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Service = nr.Service - result.Name = nr.Name + result.Project = r.Project + result.Service = r.Service + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/uptime_check_config.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/uptime_check_config.go index 051bbdb9db..90e4b23cd1 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/uptime_check_config.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/uptime_check_config.go @@ -581,9 +581,8 @@ func (c *Client) GetUptimeCheckConfig(ctx context.Context, r *UptimeCheckConfig) if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name if dcl.IsZeroValue(result.Period) { result.Period = dcl.String("60s") } diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/uptime_check_config_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/uptime_check_config_internal.go index 2ee6187dc8..d6feaad32d 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/uptime_check_config_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/beta/uptime_check_config_internal.go @@ -395,11 +395,8 @@ func (op *createUptimeCheckConfigOperation) do(ctx context.Context, r *UptimeChe op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetUptimeCheckConfig(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.go index 58b928e1aa..3a8fb16939 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.go @@ -12638,9 +12638,8 @@ func (c *Client) GetDashboard(ctx context.Context, r *Dashboard) (*Dashboard, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.yaml index 011dc3b677..af85bb3e35 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.yaml @@ -223,7 +223,7 @@ components: above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard - with the following four thresholds: { value: 90, category: + with the following four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: ''BELOW'', }, { value: 20, category: @@ -2500,7 +2500,7 @@ components: threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider - a scorecard with the following four thresholds: { value: + a scorecard with the following four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: ''BELOW'', }, { value: @@ -4626,7 +4626,7 @@ components: the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following - four thresholds: { value: 90, category: ''DANGER'', trigger: + four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: ''BELOW'', }, { value: 20, category: ''WARNING'', trigger: @@ -6872,7 +6872,7 @@ components: above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard - with the following four thresholds: { value: 90, category: + with the following four thresholds { value: 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: ''BELOW'', }, { value: 20, category: diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_schema.go index de0899b147..14e6f9bf1c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_schema.go @@ -245,7 +245,7 @@ func DCLDashboardSchema() *dcl.Schema { "thresholds": &dcl.Property{ Type: "array", GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", + Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", SendEmpty: true, ListType: "list", Items: &dcl.Property{ @@ -1732,7 +1732,7 @@ func DCLDashboardSchema() *dcl.Schema { "thresholds": &dcl.Property{ Type: "array", GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", + Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", SendEmpty: true, ListType: "list", Items: &dcl.Property{ @@ -3216,7 +3216,7 @@ func DCLDashboardSchema() *dcl.Schema { "thresholds": &dcl.Property{ Type: "array", GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", + Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", SendEmpty: true, ListType: "list", Items: &dcl.Property{ @@ -4737,7 +4737,7 @@ func DCLDashboardSchema() *dcl.Schema { "thresholds": &dcl.Property{ Type: "array", GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", + Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", SendEmpty: true, ListType: "list", Items: &dcl.Property{ diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_yaml_embed.go index 5c8535173d..6fd5ea6bef 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_yaml_embed.go @@ -17,7 +17,7 @@ package monitoring // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/dashboard.yaml -var YAML_dashboard = []byte("info:\n title: Monitoring/Dashboard\n description: The Monitoring Dashboard resource\n x-dcl-struct-name: Dashboard\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n apply:\n description: The function used to apply information about a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n delete:\n description: The function used to delete a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n deleteAll:\n description: The function used to delete all Dashboard\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Dashboard\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Dashboard:\n title: Dashboard\n x-dcl-id: projects/{{project}}/dashboards/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - displayName\n - project\n properties:\n columnLayout:\n type: object\n x-dcl-go-name: ColumnLayout\n x-dcl-go-type: DashboardColumnLayout\n description: The content is divided into equally spaced columns and the\n widgets are arranged vertically.\n x-dcl-conflicts:\n - gridLayout\n - mosaicLayout\n - rowLayout\n properties:\n columns:\n type: array\n x-dcl-go-name: Columns\n description: The columns of content to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumns\n properties:\n weight:\n type: integer\n format: int64\n x-dcl-go-name: Weight\n description: The relative weight of this column. The column weight\n is used to adjust the width of columns on the screen (relative\n to peers). Greater the weight, greater the width of the column\n on the screen. If omitted, a value of 1 is used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The display widgets arranged vertically in this column.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries\n to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field\n is optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a\n danger threshold that triggers above. Similarly, if\n x is above/below a warning threshold that triggers\n above/below, then the scorecard is in a warning state\n - unless x also puts it in a danger state. (Danger\n trumps warning.) As an example, consider a scorecard\n with the following four thresholds: { value: 90, category:\n ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category:\n ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category:\n ''DANGER'', trigger: ''BELOW'', }, { value: 20, category:\n ''WARNING'', trigger: ''BELOW'', } Then: values\n less than or equal to 10 would put the scorecard in\n a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal\n to 70 but less than 90 a WARNING state, and values\n greater than or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsText\n description: A raw string or markdown displaying textual\n content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a\n string with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data\n point frequency for this data set, implemented\n by specifying the minimum alignment period to\n use in a time series query For example, if the\n data is published once every 10 minutes, the\n `min_alignment_period` should be at least 10\n minutes. It would not make sense to fetch and\n align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted\n on the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time\n series data is returned. Use this field\n to combine multiple time series for\n different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in\n fetched time series. If non-empty, this\n unit will override any unit that accompanies\n fetched data. The format is the same as\n the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: Required. The mutable, human-readable name.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: \\`etag\\` is used for optimistic concurrency control as a way\n to help prevent simultaneous updates of a policy from overwriting each\n other. An \\`etag\\` is returned in the response to \\`GetDashboard\\`, and\n users are expected to put that etag in the request to \\`UpdateDashboard\\`\n to ensure that their change will be applied to the same version of the\n Dashboard configuration. The field should not be passed during dashboard\n creation.\n x-kubernetes-immutable: true\n gridLayout:\n type: object\n x-dcl-go-name: GridLayout\n x-dcl-go-type: DashboardGridLayout\n description: Content is arranged with a basic layout that re-flows a simple\n list of informational elements like widgets or tiles.\n x-dcl-conflicts:\n - mosaicLayout\n - rowLayout\n - columnLayout\n properties:\n columns:\n type: integer\n format: int64\n x-dcl-go-name: Columns\n description: The number of columns into which the view's width is divided.\n If omitted or set to zero, a system default will be used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The informational elements that are arranged into the columns\n row-first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardGridLayoutWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardGridLayoutWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries to return.\n See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned. An\n empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect logs\n for. Currently only projects are supported. If empty, the\n widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart. The\n value of the chart should always be greater than or\n equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart. The\n value of the chart should always be less than or equal\n to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency in\n the chart implemented by specifying the minimum alignment\n period to use in a time series query. For example, if\n the data is published once every 10 minutes it would\n not make sense to fetch and align data at one minute\n intervals. This field is optional and exists only as\n a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to show\n in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state of\n the scorecard given the time series'' current value. For\n an actual value x, the scorecard is in a danger state if\n x is less than or equal to a danger threshold that triggers\n below, or greater than or equal to a danger threshold that\n triggers above. Similarly, if x is above/below a warning\n threshold that triggers above/below, then the scorecard\n is in a warning state - unless x also puts it in a danger\n state. (Danger trumps warning.) As an example, consider\n a scorecard with the following four thresholds: { value:\n 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value:\n 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value:\n 10, category: ''DANGER'', trigger: ''BELOW'', }, { value:\n 20, category: ''WARNING'', trigger: ''BELOW'', } Then:\n values less than or equal to 10 would put the scorecard\n in a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal to 70\n but less than 90 a WARNING state, and values greater than\n or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold. Color\n is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED,\n GREY, BLUE, GREEN, YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible values:\n DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value should\n be defined in the native scale of the metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series data\n from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources, and\n projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter. Possible\n values: DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow to\n pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied to each\n time series independently to produce the value\n which will be used to compare the time series\n to other time series. Possible values: METHOD_UNSPECIFIED,\n METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after `aggregation`\n is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between two time\n series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter. Possible\n values: DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow to\n pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied to each\n time series independently to produce the value\n which will be used to compare the time series\n to other time series. Possible values: METHOD_UNSPECIFIED,\n METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after the\n ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched time\n series. If non-empty, this unit will override any unit\n that accompanies fetched data. The format is the same\n as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardGridLayoutWidgetsText\n description: A raw string or markdown displaying textual content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardGridLayoutWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a string\n with interpolations of the form `${label_name}`, which\n will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data point\n frequency for this data set, implemented by specifying\n the minimum alignment period to use in a time series\n query For example, if the data is published once every\n 10 minutes, the `min_alignment_period` should be at\n least 10 minutes. It would not make sense to fetch\n and align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted on the\n chart. Possible values: PLOT_TYPE_UNSPECIFIED, LINE,\n STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across the\n chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold. Color\n is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED,\n GREY, BLUE, GREEN, YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible values:\n DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value should\n be defined in the native scale of the metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison chart.\n A comparison chart simultaneously shows values from two\n similar-length time periods (e.g., week-over-week metrics).\n The duration must be positive, and it can only be applied\n to charts with data sets of LINE plot type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear scale\n is used. Possible values: SCALE_UNSPECIFIED, LINEAR,\n LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear scale\n is used. Possible values: SCALE_UNSPECIFIED, LINEAR,\n LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n mosaicLayout:\n type: object\n x-dcl-go-name: MosaicLayout\n x-dcl-go-type: DashboardMosaicLayout\n description: The content is arranged as a grid of tiles, with each content\n widget occupying one or more tiles.\n x-dcl-conflicts:\n - gridLayout\n - rowLayout\n - columnLayout\n properties:\n columns:\n type: integer\n format: int64\n x-dcl-go-name: Columns\n description: The number of columns in the mosaic grid.\n tiles:\n type: array\n x-dcl-go-name: Tiles\n description: The tiles to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTiles\n properties:\n height:\n type: integer\n format: int64\n x-dcl-go-name: Height\n description: The height of the tile, measured in grid squares.\n widget:\n type: object\n x-dcl-go-name: Widget\n x-dcl-go-type: DashboardMosaicLayoutTilesWidget\n description: The informational widget contained in the tile.\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries to\n return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field is\n optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a danger\n threshold that triggers above. Similarly, if x is above/below\n a warning threshold that triggers above/below, then\n the scorecard is in a warning state - unless x also\n puts it in a danger state. (Danger trumps warning.) As\n an example, consider a scorecard with the following\n four thresholds: { value: 90, category: ''DANGER'', trigger:\n ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger:\n ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger:\n ''BELOW'', }, { value: 20, category: ''WARNING'', trigger:\n ''BELOW'', } Then: values less than or equal to 10\n would put the scorecard in a DANGER state, values greater\n than 10 but less than or equal to 20 a WARNING state,\n values strictly between 20 and 70 an OK state, values\n greater than or equal to 70 but less than 90 a WARNING\n state, and values greater than or equal to 90 a DANGER\n state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible values:\n COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW,\n ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter.\n Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series. Possible\n values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between two\n time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is\n used to divide the data in all the [time\n series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series into\n a single time series, where the value\n of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time series.\n Reduction can yield a time series with\n a different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each individual\n time series is a member of exactly one\n subset. The `cross_series_reducer` is\n applied to each subset of time series.\n It is not possible to reduce across\n different resource types, so this field\n implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are\n aggregated away. If `group_by_fields`\n is not specified and all the time series\n have the same resource type, then the\n time series are aggregated into a single\n output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single\n time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on\n the `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified\n and not equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error\n is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is\n used to divide the data in all the [time\n series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series into\n a single time series, where the value\n of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time series.\n Reduction can yield a time series with\n a different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each individual\n time series is a member of exactly one\n subset. The `cross_series_reducer` is\n applied to each subset of time series.\n It is not possible to reduce across\n different resource types, so this field\n implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are\n aggregated away. If `group_by_fields`\n is not specified and all the time series\n have the same resource type, then the\n time series are aggregated into a single\n output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single\n time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on\n the `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified\n and not equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error\n is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter.\n Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series. Possible\n values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetText\n description: A raw string or markdown displaying textual content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a string\n with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data point\n frequency for this data set, implemented by specifying\n the minimum alignment period to use in a time\n series query For example, if the data is published\n once every 10 minutes, the `min_alignment_period`\n should be at least 10 minutes. It would not make\n sense to fetch and align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted on\n the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass through\n the filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to\n allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently\n to produce the value which will be\n used to compare the time series to\n other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX,\n METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the\n data. The value must be at least\n 60 seconds. If a per-series aligner\n other than `ALIGN_NONE` is specified,\n this field is required or an error\n is returned. If no per-series\n aligner is specified, or the aligner\n `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not\n all reducer operations can be\n applied to all time series. The\n valid choices depend on the `metric_kind`\n and the `value_type` of the original\n time series. Reduction can yield\n a time series with a different\n `metric_kind` or `value_type`\n than the input time series. Time\n series data must first be aligned\n (see `per_series_aligner`) in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets prior\n to applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to\n reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the\n data. The value must be at least\n 60 seconds. If a per-series aligner\n other than `ALIGN_NONE` is specified,\n this field is required or an error\n is returned. If no per-series\n aligner is specified, or the aligner\n `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not\n all reducer operations can be\n applied to all time series. The\n valid choices depend on the `metric_kind`\n and the `value_type` of the original\n time series. Reduction can yield\n a time series with a different\n `metric_kind` or `value_type`\n than the input time series. Time\n series data must first be aligned\n (see `per_series_aligner`) in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets prior\n to applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to\n reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass through\n the filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to\n allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently\n to produce the value which will be\n used to compare the time series to\n other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX,\n METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will\n override any unit that accompanies fetched\n data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible values:\n COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW,\n ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n width:\n type: integer\n format: int64\n x-dcl-go-name: Width\n description: The width of the tile, measured in grid squares.\n xPos:\n type: integer\n format: int64\n x-dcl-go-name: XPos\n description: The zero-indexed position of the tile in grid squares\n relative to the left edge of the grid.\n yPos:\n type: integer\n format: int64\n x-dcl-go-name: YPos\n description: The zero-indexed position of the tile in grid squares\n relative to the top edge of the grid.\n name:\n type: string\n x-dcl-go-name: Name\n description: Immutable. The resource name of the dashboard.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project id of the resource.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n rowLayout:\n type: object\n x-dcl-go-name: RowLayout\n x-dcl-go-type: DashboardRowLayout\n description: The content is divided into equally spaced rows and the widgets\n are arranged horizontally.\n x-dcl-conflicts:\n - gridLayout\n - mosaicLayout\n - columnLayout\n properties:\n rows:\n type: array\n x-dcl-go-name: Rows\n description: The rows of content to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRows\n properties:\n weight:\n type: integer\n format: int64\n x-dcl-go-name: Weight\n description: The relative weight of this row. The row weight is\n used to adjust the height of rows on the screen (relative to\n peers). Greater the weight, greater the height of the row on\n the screen. If omitted, a value of 1 is used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The display widgets arranged horizontally in this\n row.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries\n to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field\n is optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a\n danger threshold that triggers above. Similarly, if\n x is above/below a warning threshold that triggers\n above/below, then the scorecard is in a warning state\n - unless x also puts it in a danger state. (Danger\n trumps warning.) As an example, consider a scorecard\n with the following four thresholds: { value: 90, category:\n ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category:\n ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category:\n ''DANGER'', trigger: ''BELOW'', }, { value: 20, category:\n ''WARNING'', trigger: ''BELOW'', } Then: values\n less than or equal to 10 would put the scorecard in\n a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal\n to 70 but less than 90 a WARNING state, and values\n greater than or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsText\n description: A raw string or markdown displaying textual\n content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a\n string with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data\n point frequency for this data set, implemented\n by specifying the minimum alignment period to\n use in a time series query For example, if the\n data is published once every 10 minutes, the\n `min_alignment_period` should be at least 10\n minutes. It would not make sense to fetch and\n align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted\n on the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time\n series data is returned. Use this field\n to combine multiple time series for\n different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in\n fetched time series. If non-empty, this\n unit will override any unit that accompanies\n fetched data. The format is the same as\n the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n") +var YAML_dashboard = []byte("info:\n title: Monitoring/Dashboard\n description: The Monitoring Dashboard resource\n x-dcl-struct-name: Dashboard\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n apply:\n description: The function used to apply information about a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n delete:\n description: The function used to delete a Dashboard\n parameters:\n - name: Dashboard\n required: true\n description: A full instance of a Dashboard\n deleteAll:\n description: The function used to delete all Dashboard\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Dashboard\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Dashboard:\n title: Dashboard\n x-dcl-id: projects/{{project}}/dashboards/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - displayName\n - project\n properties:\n columnLayout:\n type: object\n x-dcl-go-name: ColumnLayout\n x-dcl-go-type: DashboardColumnLayout\n description: The content is divided into equally spaced columns and the\n widgets are arranged vertically.\n x-dcl-conflicts:\n - gridLayout\n - mosaicLayout\n - rowLayout\n properties:\n columns:\n type: array\n x-dcl-go-name: Columns\n description: The columns of content to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumns\n properties:\n weight:\n type: integer\n format: int64\n x-dcl-go-name: Weight\n description: The relative weight of this column. The column weight\n is used to adjust the width of columns on the screen (relative\n to peers). Greater the weight, greater the width of the column\n on the screen. If omitted, a value of 1 is used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The display widgets arranged vertically in this column.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries\n to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field\n is optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a\n danger threshold that triggers above. Similarly, if\n x is above/below a warning threshold that triggers\n above/below, then the scorecard is in a warning state\n - unless x also puts it in a danger state. (Danger\n trumps warning.) As an example, consider a scorecard\n with the following four thresholds { value: 90, category:\n ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category:\n ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category:\n ''DANGER'', trigger: ''BELOW'', }, { value: 20, category:\n ''WARNING'', trigger: ''BELOW'', } Then: values\n less than or equal to 10 would put the scorecard in\n a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal\n to 70 but less than 90 a WARNING state, and values\n greater than or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsText\n description: A raw string or markdown displaying textual\n content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a\n string with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data\n point frequency for this data set, implemented\n by specifying the minimum alignment period to\n use in a time series query For example, if the\n data is published once every 10 minutes, the\n `min_alignment_period` should be at least 10\n minutes. It would not make sense to fetch and\n align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted\n on the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time\n series data is returned. Use this field\n to combine multiple time series for\n different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in\n fetched time series. If non-empty, this\n unit will override any unit that accompanies\n fetched data. The format is the same as\n the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: Required. The mutable, human-readable name.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: \\`etag\\` is used for optimistic concurrency control as a way\n to help prevent simultaneous updates of a policy from overwriting each\n other. An \\`etag\\` is returned in the response to \\`GetDashboard\\`, and\n users are expected to put that etag in the request to \\`UpdateDashboard\\`\n to ensure that their change will be applied to the same version of the\n Dashboard configuration. The field should not be passed during dashboard\n creation.\n x-kubernetes-immutable: true\n gridLayout:\n type: object\n x-dcl-go-name: GridLayout\n x-dcl-go-type: DashboardGridLayout\n description: Content is arranged with a basic layout that re-flows a simple\n list of informational elements like widgets or tiles.\n x-dcl-conflicts:\n - mosaicLayout\n - rowLayout\n - columnLayout\n properties:\n columns:\n type: integer\n format: int64\n x-dcl-go-name: Columns\n description: The number of columns into which the view's width is divided.\n If omitted or set to zero, a system default will be used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The informational elements that are arranged into the columns\n row-first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardGridLayoutWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardGridLayoutWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries to return.\n See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned. An\n empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect logs\n for. Currently only projects are supported. If empty, the\n widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart. The\n value of the chart should always be greater than or\n equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart. The\n value of the chart should always be less than or equal\n to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency in\n the chart implemented by specifying the minimum alignment\n period to use in a time series query. For example, if\n the data is published once every 10 minutes it would\n not make sense to fetch and align data at one minute\n intervals. This field is optional and exists only as\n a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to show\n in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state of\n the scorecard given the time series'' current value. For\n an actual value x, the scorecard is in a danger state if\n x is less than or equal to a danger threshold that triggers\n below, or greater than or equal to a danger threshold that\n triggers above. Similarly, if x is above/below a warning\n threshold that triggers above/below, then the scorecard\n is in a warning state - unless x also puts it in a danger\n state. (Danger trumps warning.) As an example, consider\n a scorecard with the following four thresholds { value:\n 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value:\n 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value:\n 10, category: ''DANGER'', trigger: ''BELOW'', }, { value:\n 20, category: ''WARNING'', trigger: ''BELOW'', } Then:\n values less than or equal to 10 would put the scorecard\n in a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal to 70\n but less than 90 a WARNING state, and values greater than\n or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold. Color\n is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED,\n GREY, BLUE, GREEN, YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible values:\n DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value should\n be defined in the native scale of the metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series data\n from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources, and\n projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter. Possible\n values: DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow to\n pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied to each\n time series independently to produce the value\n which will be used to compare the time series\n to other time series. Possible values: METHOD_UNSPECIFIED,\n METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after `aggregation`\n is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between two time\n series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter. Possible\n values: DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow to\n pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied to each\n time series independently to produce the value\n which will be used to compare the time series\n to other time series. Possible values: METHOD_UNSPECIFIED,\n METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after the\n ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched time\n series. If non-empty, this unit will override any unit\n that accompanies fetched data. The format is the same\n as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardGridLayoutWidgetsText\n description: A raw string or markdown displaying textual content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardGridLayoutWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a string\n with interpolations of the form `${label_name}`, which\n will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data point\n frequency for this data set, implemented by specifying\n the minimum alignment period to use in a time series\n query For example, if the data is published once every\n 10 minutes, the `min_alignment_period` should be at\n least 10 minutes. It would not make sense to fetch\n and align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted on the\n chart. Possible values: PLOT_TYPE_UNSPECIFIED, LINE,\n STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across the\n chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold. Color\n is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED,\n GREY, BLUE, GREEN, YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible values:\n DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value should\n be defined in the native scale of the metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison chart.\n A comparison chart simultaneously shows values from two\n similar-length time periods (e.g., week-over-week metrics).\n The duration must be positive, and it can only be applied\n to charts with data sets of LINE plot type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear scale\n is used. Possible values: SCALE_UNSPECIFIED, LINEAR,\n LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear scale\n is used. Possible values: SCALE_UNSPECIFIED, LINEAR,\n LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n mosaicLayout:\n type: object\n x-dcl-go-name: MosaicLayout\n x-dcl-go-type: DashboardMosaicLayout\n description: The content is arranged as a grid of tiles, with each content\n widget occupying one or more tiles.\n x-dcl-conflicts:\n - gridLayout\n - rowLayout\n - columnLayout\n properties:\n columns:\n type: integer\n format: int64\n x-dcl-go-name: Columns\n description: The number of columns in the mosaic grid.\n tiles:\n type: array\n x-dcl-go-name: Tiles\n description: The tiles to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTiles\n properties:\n height:\n type: integer\n format: int64\n x-dcl-go-name: Height\n description: The height of the tile, measured in grid squares.\n widget:\n type: object\n x-dcl-go-name: Widget\n x-dcl-go-type: DashboardMosaicLayoutTilesWidget\n description: The informational widget contained in the tile.\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries to\n return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field is\n optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a danger\n threshold that triggers above. Similarly, if x is above/below\n a warning threshold that triggers above/below, then\n the scorecard is in a warning state - unless x also\n puts it in a danger state. (Danger trumps warning.) As\n an example, consider a scorecard with the following\n four thresholds { value: 90, category: ''DANGER'', trigger:\n ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger:\n ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger:\n ''BELOW'', }, { value: 20, category: ''WARNING'', trigger:\n ''BELOW'', } Then: values less than or equal to 10\n would put the scorecard in a DANGER state, values greater\n than 10 but less than or equal to 20 a WARNING state,\n values strictly between 20 and 70 an OK state, values\n greater than or equal to 70 but less than 90 a WARNING\n state, and values greater than or equal to 90 a DANGER\n state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible values:\n COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW,\n ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter.\n Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series. Possible\n values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between two\n time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is\n used to divide the data in all the [time\n series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series into\n a single time series, where the value\n of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time series.\n Reduction can yield a time series with\n a different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each individual\n time series is a member of exactly one\n subset. The `cross_series_reducer` is\n applied to each subset of time series.\n It is not possible to reduce across\n different resource types, so this field\n implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are\n aggregated away. If `group_by_fields`\n is not specified and all the time series\n have the same resource type, then the\n time series are aggregated into a single\n output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single\n time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on\n the `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified\n and not equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error\n is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is\n used to divide the data in all the [time\n series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series into\n a single time series, where the value\n of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time series.\n Reduction can yield a time series with\n a different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each individual\n time series is a member of exactly one\n subset. The `cross_series_reducer` is\n applied to each subset of time series.\n It is not possible to reduce across\n different resource types, so this field\n implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are\n aggregated away. If `group_by_fields`\n is not specified and all the time series\n have the same resource type, then the\n time series are aggregated into a single\n output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single\n time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on\n the `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified\n and not equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error\n is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter.\n Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series. Possible\n values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetText\n description: A raw string or markdown displaying textual content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a string\n with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data point\n frequency for this data set, implemented by specifying\n the minimum alignment period to use in a time\n series query For example, if the data is published\n once every 10 minutes, the `min_alignment_period`\n should be at least 10 minutes. It would not make\n sense to fetch and align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted on\n the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass through\n the filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to\n allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently\n to produce the value which will be\n used to compare the time series to\n other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX,\n METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the\n data. The value must be at least\n 60 seconds. If a per-series aligner\n other than `ALIGN_NONE` is specified,\n this field is required or an error\n is returned. If no per-series\n aligner is specified, or the aligner\n `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not\n all reducer operations can be\n applied to all time series. The\n valid choices depend on the `metric_kind`\n and the `value_type` of the original\n time series. Reduction can yield\n a time series with a different\n `metric_kind` or `value_type`\n than the input time series. Time\n series data must first be aligned\n (see `per_series_aligner`) in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets prior\n to applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to\n reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the\n data. The value must be at least\n 60 seconds. If a per-series aligner\n other than `ALIGN_NONE` is specified,\n this field is required or an error\n is returned. If no per-series\n aligner is specified, or the aligner\n `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not\n all reducer operations can be\n applied to all time series. The\n valid choices depend on the `metric_kind`\n and the `value_type` of the original\n time series. Reduction can yield\n a time series with a different\n `metric_kind` or `value_type`\n than the input time series. Time\n series data must first be aligned\n (see `per_series_aligner`) in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets prior\n to applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to\n reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass through\n the filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to\n allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently\n to produce the value which will be\n used to compare the time series to\n other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX,\n METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will\n override any unit that accompanies fetched\n data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible values:\n COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW,\n ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n width:\n type: integer\n format: int64\n x-dcl-go-name: Width\n description: The width of the tile, measured in grid squares.\n xPos:\n type: integer\n format: int64\n x-dcl-go-name: XPos\n description: The zero-indexed position of the tile in grid squares\n relative to the left edge of the grid.\n yPos:\n type: integer\n format: int64\n x-dcl-go-name: YPos\n description: The zero-indexed position of the tile in grid squares\n relative to the top edge of the grid.\n name:\n type: string\n x-dcl-go-name: Name\n description: Immutable. The resource name of the dashboard.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project id of the resource.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n rowLayout:\n type: object\n x-dcl-go-name: RowLayout\n x-dcl-go-type: DashboardRowLayout\n description: The content is divided into equally spaced rows and the widgets\n are arranged horizontally.\n x-dcl-conflicts:\n - gridLayout\n - mosaicLayout\n - columnLayout\n properties:\n rows:\n type: array\n x-dcl-go-name: Rows\n description: The rows of content to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRows\n properties:\n weight:\n type: integer\n format: int64\n x-dcl-go-name: Weight\n description: The relative weight of this row. The row weight is\n used to adjust the height of rows on the screen (relative to\n peers). Greater the weight, greater the height of the row on\n the screen. If omitted, a value of 1 is used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The display widgets arranged horizontally in this\n row.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries\n to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field\n is optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a\n danger threshold that triggers above. Similarly, if\n x is above/below a warning threshold that triggers\n above/below, then the scorecard is in a warning state\n - unless x also puts it in a danger state. (Danger\n trumps warning.) As an example, consider a scorecard\n with the following four thresholds { value: 90, category:\n ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category:\n ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category:\n ''DANGER'', trigger: ''BELOW'', }, { value: 20, category:\n ''WARNING'', trigger: ''BELOW'', } Then: values\n less than or equal to 10 would put the scorecard in\n a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal\n to 70 but less than 90 a WARNING state, and values\n greater than or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsText\n description: A raw string or markdown displaying textual\n content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a\n string with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data\n point frequency for this data set, implemented\n by specifying the minimum alignment period to\n use in a time series query For example, if the\n data is published once every 10 minutes, the\n `min_alignment_period` should be at least 10\n minutes. It would not make sense to fetch and\n align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted\n on the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time\n series data is returned. Use this field\n to combine multiple time series for\n different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in\n fetched time series. If non-empty, this\n unit will override any unit that accompanies\n fetched data. The format is the same as\n the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n") -// 655465 bytes -// MD5: db4741069b924e50ce03606ded32f291 +// 655461 bytes +// MD5: 502c0463d908e557d254dfe89f64940b diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group.go index 9d0bd57b33..a6f656c0e2 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group.go @@ -146,9 +146,8 @@ func (c *Client) GetGroup(ctx context.Context, r *Group) (*Group, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_internal.go index 183628e59d..31addc4c0b 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_internal.go @@ -311,11 +311,8 @@ func (op *createGroupOperation) do(ctx context.Context, r *Group, c *Client) err op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetGroup(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.go index 9f04156b7d..80192952d4 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.go @@ -399,9 +399,8 @@ func (c *Client) GetMetricDescriptor(ctx context.Context, r *MetricDescriptor) ( if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Type = nr.Type + result.Project = r.Project + result.Type = r.Type c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.yaml index f0142ba1ae..313684d684 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.yaml @@ -265,7 +265,7 @@ components: never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. - The grammar for a unit is as follows: Expression = Component: { "." Component + The grammar for a unit is as follows: Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_schema.go index 359159f218..3280e4f1b7 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_schema.go @@ -259,7 +259,7 @@ func DCLMetricDescriptorSchema() *dcl.Schema { "unit": &dcl.Property{ Type: "string", GoName: "Unit", - Description: "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component: { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", + Description: "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", Immutable: true, }, "valueType": &dcl.Property{ diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_yaml_embed.go index fbd00cb263..de00bc9cff 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_yaml_embed.go @@ -17,7 +17,7 @@ package monitoring // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/metric_descriptor.yaml -var YAML_metric_descriptor = []byte("info:\n title: Monitoring/MetricDescriptor\n description: The Monitoring MetricDescriptor resource\n x-dcl-struct-name: MetricDescriptor\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n apply:\n description: The function used to apply information about a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n delete:\n description: The function used to delete a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n deleteAll:\n description: The function used to delete all MetricDescriptor\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many MetricDescriptor\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n MetricDescriptor:\n title: MetricDescriptor\n x-dcl-id: projects/{{project}}/metricDescriptors/{{type}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - type\n - metricKind\n - valueType\n - project\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A detailed description of the metric, which can be used in\n documentation.\n x-kubernetes-immutable: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A concise name for the metric, which can be displayed in user\n interfaces. Use sentence case without an ending period, for example \"Request\n count\". This field is optional but it is recommended to be set for any\n metrics associated with user-visible concepts, such as Quota.\n x-kubernetes-immutable: true\n labels:\n type: array\n x-dcl-go-name: Labels\n description: The set of labels that can be used to describe a specific instance\n of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies`\n metric type has a label for the HTTP response code, `response_code`, so\n you can look at latencies for successful responses or just for responses\n that failed.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: MetricDescriptorLabels\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A human-readable description for the label.\n x-kubernetes-immutable: true\n key:\n type: string\n x-dcl-go-name: Key\n description: 'The key for this label. The key must meet the following\n criteria: * Does not exceed 100 characters. * Matches the following\n regular expression: `a-zA-Z*` * The first character must be an upper-\n or lower-case letter. * The remaining characters must be letters,\n digits, or underscores.'\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: MetricDescriptorLabelsValueTypeEnum\n description: 'The type of data that can be assigned to the label.\n Possible values: STRING, BOOL, INT64'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: MetricDescriptorLaunchStageEnum\n description: 'Optional. The launch stage of the metric definition. Possible\n values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS,\n ALPHA, BETA, GA, DEPRECATED'\n x-kubernetes-immutable: true\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n x-dcl-mutable-unreadable: true\n metadata:\n type: object\n x-dcl-go-name: Metadata\n x-dcl-go-type: MetricDescriptorMetadata\n description: Optional. Metadata which can be used to guide usage of the\n metric.\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n properties:\n ingestDelay:\n type: string\n x-dcl-go-name: IngestDelay\n description: The delay of data points caused by ingestion. Data points\n older than this age are guaranteed to be ingested and available to\n be read, excluding data loss due to errors.\n x-kubernetes-immutable: true\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: MetricDescriptorMetadataLaunchStageEnum\n description: 'Deprecated. Must use the MetricDescriptor.launch_stage\n instead. Possible values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED,\n PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED'\n x-kubernetes-immutable: true\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n samplePeriod:\n type: string\n x-dcl-go-name: SamplePeriod\n description: The sampling period of metric data points. For metrics\n which are written periodically, consecutive data points are stored\n at this time interval, excluding data loss due to errors. Metrics\n with a higher granularity have a smaller sampling period.\n x-kubernetes-immutable: true\n metricKind:\n type: string\n x-dcl-go-name: MetricKind\n x-dcl-go-type: MetricDescriptorMetricKindEnum\n description: 'Whether the metric records instantaneous values, changes to\n a value, etc. Some combinations of `metric_kind` and `value_type` might\n not be supported. Possible values: METRIC_KIND_UNSPECIFIED, GAUGE, DELTA,\n CUMULATIVE'\n x-kubernetes-immutable: true\n enum:\n - METRIC_KIND_UNSPECIFIED\n - GAUGE\n - DELTA\n - CUMULATIVE\n monitoredResourceTypes:\n type: array\n x-dcl-go-name: MonitoredResourceTypes\n readOnly: true\n description: Read-only. If present, then a time series, which is identified\n partially by a metric type and a MonitoredResourceDescriptor, that is\n associated with this metric type can only be associated with one of the\n monitored resource types listed here.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n selfLink:\n type: string\n x-dcl-go-name: SelfLink\n readOnly: true\n description: The resource name of the metric descriptor.\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n description: 'The metric type, including its DNS name prefix. The type is\n not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com`\n or `external.googleapis.com`. Metric types should use a natural hierarchical\n grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\"\n \"appengine.googleapis.com/http/server/response_latencies\"'\n x-kubernetes-immutable: true\n x-dcl-forward-slash-allowed: true\n unit:\n type: string\n x-dcl-go-name: Unit\n description: 'The units in which the metric value is reported. It is only\n applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`.\n The `unit` defines the representation of the stored metric values. Different\n systems might scale the values to be more easily displayed (so a value\n of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy`\n _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then\n the value of the metric is always in thousands of bytes, no matter how\n it might be displayed. If you want a custom metric to record the exact\n number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE`\n metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`).\n If the job uses 12,005 CPU-seconds, then the value is written as `12005`.\n Alternatively, if you want a custom metric to record data in a more granular\n way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`,\n and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}`\n and write `11.723` (which is `12005/1024`). The supported units are a\n subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html)\n standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second\n * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)**\n * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12)\n * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24)\n * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico\n (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21)\n * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi\n (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also\n includes these connectors: * `/` division or ratio (as an infix operator).\n For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost\n never have `/s` in a metric `unit`; rates should always be computed at\n query time from the underlying cumulative or delta value). * `.` multiplication\n or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`.\n The grammar for a unit is as follows: Expression = Component: { \".\" Component\n } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation\n ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation`\n is just a comment if it follows a `UNIT`. If the annotation is used alone,\n then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`,\n `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable\n ASCII characters not containing `{` or `}`. * `1` represents a unitary\n [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity)\n of 1, such as in `1/s`. It is typically used when none of the basic units\n are appropriate. For example, \"new users per day\" can be represented as\n `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users).\n Alternatively, \"thousands of page views per day\" would be represented\n as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3`\n would mean \"5300 page views per day\"). * `%` represents dimensionless\n value of 1/100, and annotates values giving a percentage (so the metric\n values are typically in the range of 0..100, and a metric value `3` means\n \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically\n in the range 0..1, that will be multiplied by 100 and displayed as a percentage\n (so a metric value `0.03` means \"3 percent\").'\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: MetricDescriptorValueTypeEnum\n description: 'Whether the measurement is an integer, a floating-point number,\n etc. Some combinations of `metric_kind` and `value_type` might not be\n supported. Possible values: STRING, BOOL, INT64'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n") +var YAML_metric_descriptor = []byte("info:\n title: Monitoring/MetricDescriptor\n description: The Monitoring MetricDescriptor resource\n x-dcl-struct-name: MetricDescriptor\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n apply:\n description: The function used to apply information about a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n delete:\n description: The function used to delete a MetricDescriptor\n parameters:\n - name: MetricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n deleteAll:\n description: The function used to delete all MetricDescriptor\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many MetricDescriptor\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n MetricDescriptor:\n title: MetricDescriptor\n x-dcl-id: projects/{{project}}/metricDescriptors/{{type}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - type\n - metricKind\n - valueType\n - project\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A detailed description of the metric, which can be used in\n documentation.\n x-kubernetes-immutable: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A concise name for the metric, which can be displayed in user\n interfaces. Use sentence case without an ending period, for example \"Request\n count\". This field is optional but it is recommended to be set for any\n metrics associated with user-visible concepts, such as Quota.\n x-kubernetes-immutable: true\n labels:\n type: array\n x-dcl-go-name: Labels\n description: The set of labels that can be used to describe a specific instance\n of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies`\n metric type has a label for the HTTP response code, `response_code`, so\n you can look at latencies for successful responses or just for responses\n that failed.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: MetricDescriptorLabels\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A human-readable description for the label.\n x-kubernetes-immutable: true\n key:\n type: string\n x-dcl-go-name: Key\n description: 'The key for this label. The key must meet the following\n criteria: * Does not exceed 100 characters. * Matches the following\n regular expression: `a-zA-Z*` * The first character must be an upper-\n or lower-case letter. * The remaining characters must be letters,\n digits, or underscores.'\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: MetricDescriptorLabelsValueTypeEnum\n description: 'The type of data that can be assigned to the label.\n Possible values: STRING, BOOL, INT64'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: MetricDescriptorLaunchStageEnum\n description: 'Optional. The launch stage of the metric definition. Possible\n values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS,\n ALPHA, BETA, GA, DEPRECATED'\n x-kubernetes-immutable: true\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n x-dcl-mutable-unreadable: true\n metadata:\n type: object\n x-dcl-go-name: Metadata\n x-dcl-go-type: MetricDescriptorMetadata\n description: Optional. Metadata which can be used to guide usage of the\n metric.\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n properties:\n ingestDelay:\n type: string\n x-dcl-go-name: IngestDelay\n description: The delay of data points caused by ingestion. Data points\n older than this age are guaranteed to be ingested and available to\n be read, excluding data loss due to errors.\n x-kubernetes-immutable: true\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: MetricDescriptorMetadataLaunchStageEnum\n description: 'Deprecated. Must use the MetricDescriptor.launch_stage\n instead. Possible values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED,\n PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED'\n x-kubernetes-immutable: true\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n samplePeriod:\n type: string\n x-dcl-go-name: SamplePeriod\n description: The sampling period of metric data points. For metrics\n which are written periodically, consecutive data points are stored\n at this time interval, excluding data loss due to errors. Metrics\n with a higher granularity have a smaller sampling period.\n x-kubernetes-immutable: true\n metricKind:\n type: string\n x-dcl-go-name: MetricKind\n x-dcl-go-type: MetricDescriptorMetricKindEnum\n description: 'Whether the metric records instantaneous values, changes to\n a value, etc. Some combinations of `metric_kind` and `value_type` might\n not be supported. Possible values: METRIC_KIND_UNSPECIFIED, GAUGE, DELTA,\n CUMULATIVE'\n x-kubernetes-immutable: true\n enum:\n - METRIC_KIND_UNSPECIFIED\n - GAUGE\n - DELTA\n - CUMULATIVE\n monitoredResourceTypes:\n type: array\n x-dcl-go-name: MonitoredResourceTypes\n readOnly: true\n description: Read-only. If present, then a time series, which is identified\n partially by a metric type and a MonitoredResourceDescriptor, that is\n associated with this metric type can only be associated with one of the\n monitored resource types listed here.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n selfLink:\n type: string\n x-dcl-go-name: SelfLink\n readOnly: true\n description: The resource name of the metric descriptor.\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n description: 'The metric type, including its DNS name prefix. The type is\n not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com`\n or `external.googleapis.com`. Metric types should use a natural hierarchical\n grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\"\n \"appengine.googleapis.com/http/server/response_latencies\"'\n x-kubernetes-immutable: true\n x-dcl-forward-slash-allowed: true\n unit:\n type: string\n x-dcl-go-name: Unit\n description: 'The units in which the metric value is reported. It is only\n applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`.\n The `unit` defines the representation of the stored metric values. Different\n systems might scale the values to be more easily displayed (so a value\n of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy`\n _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then\n the value of the metric is always in thousands of bytes, no matter how\n it might be displayed. If you want a custom metric to record the exact\n number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE`\n metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`).\n If the job uses 12,005 CPU-seconds, then the value is written as `12005`.\n Alternatively, if you want a custom metric to record data in a more granular\n way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`,\n and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}`\n and write `11.723` (which is `12005/1024`). The supported units are a\n subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html)\n standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second\n * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)**\n * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12)\n * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24)\n * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico\n (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21)\n * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi\n (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also\n includes these connectors: * `/` division or ratio (as an infix operator).\n For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost\n never have `/s` in a metric `unit`; rates should always be computed at\n query time from the underlying cumulative or delta value). * `.` multiplication\n or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`.\n The grammar for a unit is as follows: Expression = Component { \".\" Component\n } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation\n ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation`\n is just a comment if it follows a `UNIT`. If the annotation is used alone,\n then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`,\n `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable\n ASCII characters not containing `{` or `}`. * `1` represents a unitary\n [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity)\n of 1, such as in `1/s`. It is typically used when none of the basic units\n are appropriate. For example, \"new users per day\" can be represented as\n `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users).\n Alternatively, \"thousands of page views per day\" would be represented\n as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3`\n would mean \"5300 page views per day\"). * `%` represents dimensionless\n value of 1/100, and annotates values giving a percentage (so the metric\n values are typically in the range of 0..100, and a metric value `3` means\n \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically\n in the range 0..1, that will be multiplied by 100 and displayed as a percentage\n (so a metric value `0.03` means \"3 percent\").'\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: MetricDescriptorValueTypeEnum\n description: 'Whether the measurement is an integer, a floating-point number,\n etc. Some combinations of `metric_kind` and `value_type` might not be\n supported. Possible values: STRING, BOOL, INT64'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n") -// 13522 bytes -// MD5: 79db6507b19e533c588da0a300225562 +// 13521 bytes +// MD5: 9b08860b811ef86fd547161cfb28665e diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope.go index d469815101..5e953429ab 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope.go @@ -142,8 +142,7 @@ func (c *Client) GetMetricsScope(ctx context.Context, r *MetricsScope) (*Metrics if err != nil { return nil, err } - nr := r.urlNormalized() - result.Name = nr.Name + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_internal.go index 8231860968..05be6e7a33 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_internal.go @@ -442,7 +442,7 @@ func expandMetricsScope(c *Client, f *MetricsScope) (map[string]interface{}, err m := make(map[string]interface{}) res := f _ = res - if v, err := dcl.DeriveField("locations/global/metricsScope/%s", f.Name, dcl.SelfLinkToName(f.Name)); err != nil { + if v, err := dcl.ExpandProjectIDsToNumbers(c.Config, f.Name); err != nil { return nil, fmt.Errorf("error expanding Name into name: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { m["name"] = v @@ -463,7 +463,7 @@ func flattenMetricsScope(c *Client, i interface{}, res *MetricsScope) *MetricsSc } resultRes := &MetricsScope{} - resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Name = dcl.FlattenProjectNumbersToIDs(c.Config, dcl.FlattenString(m["name"])) resultRes.CreateTime = dcl.FlattenString(m["createTime"]) resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) resultRes.MonitoredProjects = flattenMetricsScopeMonitoredProjectsSlice(c, m["monitoredProjects"], res) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel.go index 22825f7bae..88bf0d5b2a 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel.go @@ -179,9 +179,8 @@ func (c *Client) GetNotificationChannel(ctx context.Context, r *NotificationChan if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name if dcl.IsZeroValue(result.Enabled) { result.Enabled = dcl.Bool(true) } diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_internal.go index e35d79ac3a..4e32a3cd09 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_internal.go @@ -313,11 +313,8 @@ func (op *createNotificationChannelOperation) do(ctx context.Context, r *Notific op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetNotificationChannel(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service.go index c6cf87b9b2..8243887d87 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service.go @@ -237,9 +237,8 @@ func (c *Client) GetService(ctx context.Context, r *Service) (*Service, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name if dcl.IsZeroValue(result.Custom) { result.Custom = &ServiceCustom{} } diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective.go index 758c82a6bc..19d859a621 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective.go @@ -1548,10 +1548,9 @@ func (c *Client) GetServiceLevelObjective(ctx context.Context, r *ServiceLevelOb if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Service = nr.Service - result.Name = nr.Name + result.Project = r.Project + result.Service = r.Service + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config.go index 197994bdff..ffb29e8c6c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config.go @@ -581,9 +581,8 @@ func (c *Client) GetUptimeCheckConfig(ctx context.Context, r *UptimeCheckConfig) if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name if dcl.IsZeroValue(result.Period) { result.Period = dcl.String("60s") } diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_internal.go index b362292d2c..6b5298a380 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_internal.go @@ -395,11 +395,8 @@ func (op *createUptimeCheckConfigOperation) do(ctx context.Context, r *UptimeChe op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetUptimeCheckConfig(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/beta/hub.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/beta/hub.go index c0bbdbb3f2..fabd61b401 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/beta/hub.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/beta/hub.go @@ -227,9 +227,8 @@ func (c *Client) GetHub(ctx context.Context, r *Hub) (*Hub, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/beta/spoke.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/beta/spoke.go index c4b71e51b7..46e4ed815a 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/beta/spoke.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/beta/spoke.go @@ -386,10 +386,9 @@ func (c *Client) GetSpoke(ctx context.Context, r *Spoke) (*Spoke, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.go index 046b8ec0fd..204ad982b2 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/hub.go @@ -227,9 +227,8 @@ func (c *Client) GetHub(ctx context.Context, r *Hub) (*Hub, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.go index 268b583f54..6a4a3751d3 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity/spoke.go @@ -386,10 +386,9 @@ func (c *Client) GetSpoke(ctx context.Context, r *Spoke) (*Spoke, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/authorization_policy.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/authorization_policy.go index c846f5c19b..d4c9b7508f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/authorization_policy.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/authorization_policy.go @@ -385,10 +385,9 @@ func (c *Client) GetAuthorizationPolicy(ctx context.Context, r *AuthorizationPol if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/client_tls_policy.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/client_tls_policy.go index 6a745cb48a..4a598b30b6 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/client_tls_policy.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/client_tls_policy.go @@ -440,10 +440,9 @@ func (c *Client) GetClientTlsPolicy(ctx context.Context, r *ClientTlsPolicy) (*C if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/server_tls_policy.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/server_tls_policy.go index a97ee7188d..6af5378a46 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/server_tls_policy.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networksecurity/beta/server_tls_policy.go @@ -486,10 +486,9 @@ func (c *Client) GetServerTlsPolicy(ctx context.Context, r *ServerTlsPolicy) (*S if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/endpoint_policy.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/endpoint_policy.go index 8f2ee15b65..17ef81057b 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/endpoint_policy.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/endpoint_policy.go @@ -407,10 +407,9 @@ func (c *Client) GetEndpointPolicy(ctx context.Context, r *EndpointPolicy) (*End if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/gateway.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/gateway.go index cf006bbee9..4f338713e6 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/gateway.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/gateway.go @@ -191,10 +191,9 @@ func (c *Client) GetGateway(ctx context.Context, r *Gateway) (*Gateway, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/grpc_route.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/grpc_route.go index 8a58dc023e..ea96d9302f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/grpc_route.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/grpc_route.go @@ -723,10 +723,9 @@ func (c *Client) GetGrpcRoute(ctx context.Context, r *GrpcRoute) (*GrpcRoute, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/http_filter.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/http_filter.go index 960422136a..c6b4c274f3 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/http_filter.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/http_filter.go @@ -155,10 +155,9 @@ func (c *Client) GetHttpFilter(ctx context.Context, r *HttpFilter) (*HttpFilter, if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/http_route.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/http_route.go index c12794d9e4..b8c8ab9f28 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/http_route.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/http_route.go @@ -1175,10 +1175,9 @@ func (c *Client) GetHttpRoute(ctx context.Context, r *HttpRoute) (*HttpRoute, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/mesh.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/mesh.go index 76366871f9..ba430377eb 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/mesh.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/mesh.go @@ -154,10 +154,9 @@ func (c *Client) GetMesh(ctx context.Context, r *Mesh) (*Mesh, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/service_binding.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/service_binding.go index 6c354bd959..2e65b26605 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/service_binding.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/service_binding.go @@ -151,10 +151,9 @@ func (c *Client) GetServiceBinding(ctx context.Context, r *ServiceBinding) (*Ser if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/tcp_route.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/tcp_route.go index 794fe6d6e1..7d2b5f5be0 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/tcp_route.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/tcp_route.go @@ -358,10 +358,9 @@ func (c *Client) GetTcpRoute(ctx context.Context, r *TcpRoute) (*TcpRoute, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/tls_route.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/tls_route.go index eb7da75dec..cd063386dd 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/tls_route.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/alpha/tls_route.go @@ -351,10 +351,9 @@ func (c *Client) GetTlsRoute(ctx context.Context, r *TlsRoute) (*TlsRoute, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/endpoint_policy.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/endpoint_policy.go index cf3d64880d..1d9ab182ef 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/endpoint_policy.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/endpoint_policy.go @@ -407,10 +407,9 @@ func (c *Client) GetEndpointPolicy(ctx context.Context, r *EndpointPolicy) (*End if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/gateway.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/gateway.go index 2bff487cc3..e06f1dcbf6 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/gateway.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/gateway.go @@ -191,10 +191,9 @@ func (c *Client) GetGateway(ctx context.Context, r *Gateway) (*Gateway, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/grpc_route.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/grpc_route.go index 884d0cfb9a..2c1b420bad 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/grpc_route.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/grpc_route.go @@ -723,10 +723,9 @@ func (c *Client) GetGrpcRoute(ctx context.Context, r *GrpcRoute) (*GrpcRoute, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/http_route.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/http_route.go index 6b806a26e2..54abc8f2b1 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/http_route.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/http_route.go @@ -1175,10 +1175,9 @@ func (c *Client) GetHttpRoute(ctx context.Context, r *HttpRoute) (*HttpRoute, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/mesh.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/mesh.go index f02483988f..944d080ddb 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/mesh.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/mesh.go @@ -154,10 +154,9 @@ func (c *Client) GetMesh(ctx context.Context, r *Mesh) (*Mesh, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/service_binding.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/service_binding.go index 88790d3880..ac6d723a00 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/service_binding.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/service_binding.go @@ -151,10 +151,9 @@ func (c *Client) GetServiceBinding(ctx context.Context, r *ServiceBinding) (*Ser if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/tcp_route.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/tcp_route.go index 352d4fc919..8babf579a1 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/tcp_route.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/tcp_route.go @@ -358,10 +358,9 @@ func (c *Client) GetTcpRoute(ctx context.Context, r *TcpRoute) (*TcpRoute, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/tls_route.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/tls_route.go index 58a4fba860..b989794ec9 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/tls_route.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkservices/beta/tls_route.go @@ -351,10 +351,9 @@ func (c *Client) GetTlsRoute(ctx context.Context, r *TlsRoute) (*TlsRoute, error if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy/beta/policy.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy/beta/policy.go index 968f323d81..b86f72ef7c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy/beta/policy.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy/beta/policy.go @@ -376,9 +376,8 @@ func (c *Client) GetPolicy(ctx context.Context, r *Policy) (*Policy, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Parent = nr.Parent - result.Name = nr.Name + result.Parent = r.Parent + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy.go index 1c82e25091..9fd7e16e43 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy.go @@ -1913,9 +1913,8 @@ func (c *Client) GetGuestPolicy(ctx context.Context, r *GuestPolicy) (*GuestPoli if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy.yaml index 07ac1d0af6..df31e63c34 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy.yaml @@ -493,9 +493,9 @@ components: uri: type: string x-dcl-go-name: Uri - description: 'URI from which to fetch the object. It should - contain both the protocol and path following the format: - {protocol}://{location}.' + description: URI from which to fetch the object. It should + contain both the protocol and path following the format + {protocol}://{location}. desiredState: type: string x-dcl-go-name: DesiredState diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy_beta_yaml_embed.go index a4b0a1fdf2..181bbb533b 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/osconfig/beta/guest_policy.yaml -var YAML_guest_policy = []byte("info:\n title: OSConfig/GuestPolicy\n description: The OSConfig GuestPolicy resource\n x-dcl-struct-name: GuestPolicy\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a GuestPolicy\n parameters:\n - name: GuestPolicy\n required: true\n description: A full instance of a GuestPolicy\n apply:\n description: The function used to apply information about a GuestPolicy\n parameters:\n - name: GuestPolicy\n required: true\n description: A full instance of a GuestPolicy\n delete:\n description: The function used to delete a GuestPolicy\n parameters:\n - name: GuestPolicy\n required: true\n description: A full instance of a GuestPolicy\n deleteAll:\n description: The function used to delete all GuestPolicy\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many GuestPolicy\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n GuestPolicy:\n title: GuestPolicy\n x-dcl-id: projects/{{project}}/guestPolicies/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n properties:\n assignment:\n type: object\n x-dcl-go-name: Assignment\n x-dcl-go-type: GuestPolicyAssignment\n description: Specifies the VMs that are assigned this policy. This allows\n you to target sets or groups of VMs by different parameters such as labels,\n names, OS, or zones. Empty assignments will target ALL VMs underneath\n this policy. Conflict Management Policies that exist higher up in the\n resource hierarchy (closer to the Org) will override those lower down\n if there is a conflict. At the same level in the resource hierarchy (ie.\n within a project), the service will prevent the creation of multiple policies\n that conflict with each other. If there are multiple policies that specify\n the same config (eg. package, software recipe, repository, etc.), the\n service will ensure that no VM could potentially receive instructions\n from both policies. To create multiple policies that specify different\n versions of a package or different configs for different Operating Systems,\n each policy must be mutually exclusive in their targeting according to\n labels, OS, or other criteria. Different configs are identified for conflicts\n in different ways. Packages are identified by their name and the package\n manager(s) they target. Package repositories are identified by their unique\n id where applicable. Some package managers don't have a unique identifier\n for repositories and where that's the case, no uniqueness is validated\n by the service. Note that if OS Inventory is disabled, a VM will not be\n assigned a policy that targets by OS because the service will see this\n VM's OS as unknown.\n properties:\n groupLabels:\n type: array\n x-dcl-go-name: GroupLabels\n description: Targets instances matching at least one of these label\n sets. This allows an assignment to target disparate groups, for example\n \"env=prod or env=staging\".\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyAssignmentGroupLabels\n properties:\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Google Compute Engine instance labels that must be\n present for an instance to be included in this assignment group.\n instanceNamePrefixes:\n type: array\n x-dcl-go-name: InstanceNamePrefixes\n description: Targets VM instances whose name starts with one of these\n prefixes. Like labels, this is another way to group VM instances when\n targeting configs, for example prefix=\"prod-\". Only supported for\n project-level policies.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n instances:\n type: array\n x-dcl-go-name: Instances\n description: Targets any of the instances specified. Instances are specified\n by their URI in the form `zones/[ZONE]/instances/[INSTANCE_NAME]`.\n Instance targeting is uncommon and is supported to facilitate the\n management of changes by the instance or to target specific VM instances\n for development and testing. Only supported for project-level policies\n and must reference instances within this project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n osTypes:\n type: array\n x-dcl-go-name: OSTypes\n description: Targets VM instances matching at least one of the following\n OS types. VM instances must match all supplied criteria for a given\n OsType to be included.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyAssignmentOSTypes\n properties:\n osArchitecture:\n type: string\n x-dcl-go-name: OSArchitecture\n description: Targets VM instances with OS Inventory enabled and\n having the following OS architecture.\n osShortName:\n type: string\n x-dcl-go-name: OSShortName\n description: Targets VM instances with OS Inventory enabled and\n having the following OS short name, for example \"debian\" or\n \"windows\".\n osVersion:\n type: string\n x-dcl-go-name: OSVersion\n description: Targets VM instances with OS Inventory enabled and\n having the following following OS version.\n zones:\n type: array\n x-dcl-go-name: Zones\n description: Targets instances in any of these zones. Leave empty to\n target instances in any zone. Zonal targeting is uncommon and is supported\n to facilitate the management of changes by zone.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Time this GuestPolicy was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Description of the GuestPolicy. Length of the description is\n limited to 1024 characters.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: The etag for this GuestPolicy. If this is provided on update,\n it must match the server's etag.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Unique name of the resource in this project using the form:\n `projects/{project_id}/guestPolicies/{guest_policy_id}`.'\n packageRepositories:\n type: array\n x-dcl-go-name: PackageRepositories\n description: List of package repository configurations assigned to the VM\n instance.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyPackageRepositories\n properties:\n apt:\n type: object\n x-dcl-go-name: Apt\n x-dcl-go-type: GuestPolicyPackageRepositoriesApt\n description: An Apt Repository.\n x-dcl-conflicts:\n - goo\n - yum\n - zypper\n required:\n - uri\n - distribution\n properties:\n archiveType:\n type: string\n x-dcl-go-name: ArchiveType\n x-dcl-go-type: GuestPolicyPackageRepositoriesAptArchiveTypeEnum\n description: 'Type of archive files in this repository. The default\n behavior is DEB. Possible values: ARCHIVE_TYPE_UNSPECIFIED,\n DEB, DEB_SRC'\n enum:\n - ARCHIVE_TYPE_UNSPECIFIED\n - DEB\n - DEB_SRC\n components:\n type: array\n x-dcl-go-name: Components\n description: Required. List of components for this repository.\n Must contain at least one item.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n distribution:\n type: string\n x-dcl-go-name: Distribution\n description: Required. Distribution of this repository.\n gpgKey:\n type: string\n x-dcl-go-name: GpgKey\n description: URI of the key file for this repository. The agent\n maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`\n containing all the keys in any applied guest policy.\n uri:\n type: string\n x-dcl-go-name: Uri\n description: Required. URI for this repository.\n goo:\n type: object\n x-dcl-go-name: Goo\n x-dcl-go-type: GuestPolicyPackageRepositoriesGoo\n description: A Goo Repository.\n x-dcl-conflicts:\n - apt\n - yum\n - zypper\n required:\n - name\n - url\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. The name of the repository.\n url:\n type: string\n x-dcl-go-name: Url\n description: Required. The url of the repository.\n yum:\n type: object\n x-dcl-go-name: Yum\n x-dcl-go-type: GuestPolicyPackageRepositoriesYum\n description: A Yum Repository.\n x-dcl-conflicts:\n - apt\n - goo\n - zypper\n required:\n - id\n - baseUrl\n properties:\n baseUrl:\n type: string\n x-dcl-go-name: BaseUrl\n description: Required. The location of the repository directory.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: The display name of the repository.\n gpgKeys:\n type: array\n x-dcl-go-name: GpgKeys\n description: URIs of GPG keys.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n id:\n type: string\n x-dcl-go-name: Id\n description: Required. A one word, unique name for this repository.\n This is the `repo id` in the Yum config file and also the `display_name`\n if `display_name` is omitted. This id is also used as the unique\n identifier when checking for guest policy conflicts.\n zypper:\n type: object\n x-dcl-go-name: Zypper\n x-dcl-go-type: GuestPolicyPackageRepositoriesZypper\n description: A Zypper Repository.\n x-dcl-conflicts:\n - apt\n - goo\n - yum\n required:\n - id\n - baseUrl\n properties:\n baseUrl:\n type: string\n x-dcl-go-name: BaseUrl\n description: Required. The location of the repository directory.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: The display name of the repository.\n gpgKeys:\n type: array\n x-dcl-go-name: GpgKeys\n description: URIs of GPG keys.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n id:\n type: string\n x-dcl-go-name: Id\n description: Required. A one word, unique name for this repository.\n This is the `repo id` in the zypper config file and also the\n `display_name` if `display_name` is omitted. This id is also\n used as the unique identifier when checking for guest policy\n conflicts.\n packages:\n type: array\n x-dcl-go-name: Packages\n description: List of package configurations assigned to the VM instance.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyPackages\n properties:\n desiredState:\n type: string\n x-dcl-go-name: DesiredState\n x-dcl-go-type: GuestPolicyPackagesDesiredStateEnum\n description: 'The desired_state the agent should maintain for this\n package. The default is to ensure the package is installed. Possible\n values: DESIRED_STATE_UNSPECIFIED, INSTALLED, REMOVED'\n enum:\n - DESIRED_STATE_UNSPECIFIED\n - INSTALLED\n - REMOVED\n manager:\n type: string\n x-dcl-go-name: Manager\n x-dcl-go-type: GuestPolicyPackagesManagerEnum\n description: 'Type of package manager that can be used to install\n this package. If a system does not have the package manager, the\n package is not installed or removed no error message is returned.\n By default, or if you specify `ANY`, the agent attempts to install\n and remove this package using the default package manager. This\n is useful when creating a policy that applies to different types\n of systems. The default behavior is ANY. Possible values: MANAGER_UNSPECIFIED,\n ANY, APT, YUM, ZYPPER, GOO'\n enum:\n - MANAGER_UNSPECIFIED\n - ANY\n - APT\n - YUM\n - ZYPPER\n - GOO\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. The name of the package. A package is uniquely\n identified for conflict validation by checking the package name\n and the manager(s) that the package targets.\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n recipes:\n type: array\n x-dcl-go-name: Recipes\n description: Optional. A list of Recipes to install on the VM.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyRecipes\n properties:\n artifacts:\n type: array\n x-dcl-go-name: Artifacts\n description: Resources available to be used in the steps in the recipe.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyRecipesArtifacts\n properties:\n allowInsecure:\n type: boolean\n x-dcl-go-name: AllowInsecure\n description: 'Defaults to false. When false, recipes are subject\n to validations based on the artifact type: Remote: A checksum\n must be specified, and only protocols with transport-layer\n security are permitted. GCS: An object generation number must\n be specified.'\n gcs:\n type: object\n x-dcl-go-name: Gcs\n x-dcl-go-type: GuestPolicyRecipesArtifactsGcs\n description: A Google Cloud Storage artifact.\n properties:\n bucket:\n type: string\n x-dcl-go-name: Bucket\n description: 'Bucket of the Google Cloud Storage object.\n Given an example URL: `https://storage.googleapis.com/my-bucket/foo/bar#1234567`\n this value would be `my-bucket`.'\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n description: Must be provided if allow_insecure is false.\n Generation number of the Google Cloud Storage object.\n `https://storage.googleapis.com/my-bucket/foo/bar#1234567`\n this value would be `1234567`.\n object:\n type: string\n x-dcl-go-name: Object\n description: 'Name of the Google Cloud Storage object. As\n specified [here] (https://cloud.google.com/storage/docs/naming#objectnames)\n Given an example URL: `https://storage.googleapis.com/my-bucket/foo/bar#1234567`\n this value would be `foo/bar`.'\n id:\n type: string\n x-dcl-go-name: Id\n description: Required. Id of the artifact, which the installation\n and update steps of this recipe can reference. Artifacts in\n a recipe cannot have the same id.\n remote:\n type: object\n x-dcl-go-name: Remote\n x-dcl-go-type: GuestPolicyRecipesArtifactsRemote\n description: A generic remote artifact.\n properties:\n checksum:\n type: string\n x-dcl-go-name: Checksum\n description: Must be provided if `allow_insecure` is `false`.\n SHA256 checksum in hex format, to compare to the checksum\n of the artifact. If the checksum is not empty and it doesn't\n match the artifact then the recipe installation fails\n before running any of the steps.\n uri:\n type: string\n x-dcl-go-name: Uri\n description: 'URI from which to fetch the object. It should\n contain both the protocol and path following the format:\n {protocol}://{location}.'\n desiredState:\n type: string\n x-dcl-go-name: DesiredState\n x-dcl-go-type: GuestPolicyRecipesDesiredStateEnum\n description: 'Default is INSTALLED. The desired state the agent should\n maintain for this recipe. INSTALLED: The software recipe is installed\n on the instance but won''t be updated to new versions. UPDATED:\n The software recipe is installed on the instance. The recipe is\n updated to a higher version, if a higher version of the recipe is\n assigned to this instance. REMOVE: Remove is unsupported for software\n recipes and attempts to create or update a recipe to the REMOVE\n state is rejected. Possible values: DESIRED_STATE_UNSPECIFIED, INSTALLED,\n REMOVED'\n enum:\n - DESIRED_STATE_UNSPECIFIED\n - INSTALLED\n - REMOVED\n installSteps:\n type: array\n x-dcl-go-name: InstallSteps\n description: Actions to be taken for installing this recipe. On failure\n it stops executing steps and does not attempt another installation.\n Any steps taken (including partially completed steps) are not rolled\n back.\n x-dcl-conflicts:\n - updateSteps\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyRecipesInstallSteps\n properties:\n archiveExtraction:\n type: object\n x-dcl-go-name: ArchiveExtraction\n x-dcl-go-type: GuestPolicyRecipesInstallStepsArchiveExtraction\n description: Extracts an archive into the specified directory.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n destination:\n type: string\n x-dcl-go-name: Destination\n description: Directory to extract archive to. Defaults to\n `/` on Linux or `C:` on Windows.\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: GuestPolicyRecipesInstallStepsArchiveExtractionTypeEnum\n description: 'Required. The type of the archive to extract.\n Possible values: TYPE_UNSPECIFIED, VALIDATION, DESIRED_STATE_CHECK,\n DESIRED_STATE_ENFORCEMENT, DESIRED_STATE_CHECK_POST_ENFORCEMENT'\n enum:\n - TYPE_UNSPECIFIED\n - VALIDATION\n - DESIRED_STATE_CHECK\n - DESIRED_STATE_ENFORCEMENT\n - DESIRED_STATE_CHECK_POST_ENFORCEMENT\n dpkgInstallation:\n type: object\n x-dcl-go-name: DpkgInstallation\n x-dcl-go-type: GuestPolicyRecipesInstallStepsDpkgInstallation\n description: Installs a deb file via dpkg.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n fileCopy:\n type: object\n x-dcl-go-name: FileCopy\n x-dcl-go-type: GuestPolicyRecipesInstallStepsFileCopy\n description: Copies a file onto the instance.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n destination:\n type: string\n x-dcl-go-name: Destination\n description: Required. The absolute path on the instance\n to put the file.\n overwrite:\n type: boolean\n x-dcl-go-name: Overwrite\n description: Whether to allow this step to overwrite existing\n files. If this is false and the file already exists the\n file is not overwritten and the step is considered a success.\n Defaults to false.\n permissions:\n type: string\n x-dcl-go-name: Permissions\n description: 'Consists of three octal digits which represent,\n in order, the permissions of the owner, group, and other\n users for the file (similarly to the numeric mode used\n in the linux chmod utility). Each digit represents a three\n bit number with the 4 bit corresponding to the read permissions,\n the 2 bit corresponds to the write bit, and the one bit\n corresponds to the execute permission. Default behavior\n is 755. Below are some examples of permissions and their\n associated values: read, write, and execute: 7 read and\n execute: 5 read and write: 6 read only: 4'\n fileExec:\n type: object\n x-dcl-go-name: FileExec\n x-dcl-go-type: GuestPolicyRecipesInstallStepsFileExec\n description: Executes an artifact or local file.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Defaults to [0]. A list of possible return\n values that the program can return to indicate a success.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n args:\n type: array\n x-dcl-go-name: Args\n description: Arguments to be passed to the provided executable.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: The id of the relevant artifact in the recipe.\n localPath:\n type: string\n x-dcl-go-name: LocalPath\n description: The absolute path of the file on the local\n filesystem.\n msiInstallation:\n type: object\n x-dcl-go-name: MsiInstallation\n x-dcl-go-type: GuestPolicyRecipesInstallStepsMsiInstallation\n description: Installs an MSI file.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Return codes that indicate that the software\n installed or updated successfully. Behaviour defaults\n to [0]\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n flags:\n type: array\n x-dcl-go-name: Flags\n description: The flags to use when installing the MSI defaults\n to [\"/i\"] (i.e. the install flag).\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n rpmInstallation:\n type: object\n x-dcl-go-name: RpmInstallation\n x-dcl-go-type: GuestPolicyRecipesInstallStepsRpmInstallation\n description: Installs an rpm file via the rpm utility.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n scriptRun:\n type: object\n x-dcl-go-name: ScriptRun\n x-dcl-go-type: GuestPolicyRecipesInstallStepsScriptRun\n description: Runs commands in a shell.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Return codes that indicate that the software\n installed or updated successfully. Behaviour defaults\n to [0]\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n interpreter:\n type: string\n x-dcl-go-name: Interpreter\n x-dcl-go-type: GuestPolicyRecipesInstallStepsScriptRunInterpreterEnum\n description: 'The script interpreter to use to run the script.\n If no interpreter is specified the script is executed\n directly, which likely only succeed for scripts with [shebang\n lines](https://en.wikipedia.org/wiki/Shebang_(Unix)).\n Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL,\n POWERSHELL'\n enum:\n - INTERPRETER_UNSPECIFIED\n - NONE\n - SHELL\n - POWERSHELL\n script:\n type: string\n x-dcl-go-name: Script\n description: Required. The shell script to be executed.\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Unique identifier for the recipe. Only one\n recipe with a given name is installed on an instance. Names are\n also used to identify resources which helps to determine whether\n guest policies have conflicts. This means that requests to create\n multiple recipes with the same name and version are rejected since\n they could potentially have conflicting assignments.\n updateSteps:\n type: array\n x-dcl-go-name: UpdateSteps\n description: Actions to be taken for updating this recipe. On failure\n it stops executing steps and does not attempt another update for\n this recipe. Any steps taken (including partially completed steps)\n are not rolled back.\n x-dcl-conflicts:\n - installSteps\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyRecipesUpdateSteps\n properties:\n archiveExtraction:\n type: object\n x-dcl-go-name: ArchiveExtraction\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsArchiveExtraction\n description: Extracts an archive into the specified directory.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n destination:\n type: string\n x-dcl-go-name: Destination\n description: Directory to extract archive to. Defaults to\n `/` on Linux or `C:` on Windows.\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsArchiveExtractionTypeEnum\n description: 'Required. The type of the archive to extract.\n Possible values: TYPE_UNSPECIFIED, VALIDATION, DESIRED_STATE_CHECK,\n DESIRED_STATE_ENFORCEMENT, DESIRED_STATE_CHECK_POST_ENFORCEMENT'\n enum:\n - TYPE_UNSPECIFIED\n - VALIDATION\n - DESIRED_STATE_CHECK\n - DESIRED_STATE_ENFORCEMENT\n - DESIRED_STATE_CHECK_POST_ENFORCEMENT\n dpkgInstallation:\n type: object\n x-dcl-go-name: DpkgInstallation\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsDpkgInstallation\n description: Installs a deb file via dpkg.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n fileCopy:\n type: object\n x-dcl-go-name: FileCopy\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsFileCopy\n description: Copies a file onto the instance.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n destination:\n type: string\n x-dcl-go-name: Destination\n description: Required. The absolute path on the instance\n to put the file.\n overwrite:\n type: boolean\n x-dcl-go-name: Overwrite\n description: Whether to allow this step to overwrite existing\n files. If this is false and the file already exists the\n file is not overwritten and the step is considered a success.\n Defaults to false.\n permissions:\n type: string\n x-dcl-go-name: Permissions\n description: 'Consists of three octal digits which represent,\n in order, the permissions of the owner, group, and other\n users for the file (similarly to the numeric mode used\n in the linux chmod utility). Each digit represents a three\n bit number with the 4 bit corresponding to the read permissions,\n the 2 bit corresponds to the write bit, and the one bit\n corresponds to the execute permission. Default behavior\n is 755. Below are some examples of permissions and their\n associated values: read, write, and execute: 7 read and\n execute: 5 read and write: 6 read only: 4'\n fileExec:\n type: object\n x-dcl-go-name: FileExec\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsFileExec\n description: Executes an artifact or local file.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Defaults to [0]. A list of possible return\n values that the program can return to indicate a success.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n args:\n type: array\n x-dcl-go-name: Args\n description: Arguments to be passed to the provided executable.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: The id of the relevant artifact in the recipe.\n localPath:\n type: string\n x-dcl-go-name: LocalPath\n description: The absolute path of the file on the local\n filesystem.\n msiInstallation:\n type: object\n x-dcl-go-name: MsiInstallation\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsMsiInstallation\n description: Installs an MSI file.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Return codes that indicate that the software\n installed or updated successfully. Behaviour defaults\n to [0]\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n flags:\n type: array\n x-dcl-go-name: Flags\n description: The flags to use when installing the MSI defaults\n to [\"/i\"] (i.e. the install flag).\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n rpmInstallation:\n type: object\n x-dcl-go-name: RpmInstallation\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsRpmInstallation\n description: Installs an rpm file via the rpm utility.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n scriptRun:\n type: object\n x-dcl-go-name: ScriptRun\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsScriptRun\n description: Runs commands in a shell.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Return codes that indicate that the software\n installed or updated successfully. Behaviour defaults\n to [0]\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n interpreter:\n type: string\n x-dcl-go-name: Interpreter\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsScriptRunInterpreterEnum\n description: 'The script interpreter to use to run the script.\n If no interpreter is specified the script is executed\n directly, which likely only succeed for scripts with [shebang\n lines](https://en.wikipedia.org/wiki/Shebang_(Unix)).\n Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL,\n POWERSHELL'\n enum:\n - INTERPRETER_UNSPECIFIED\n - NONE\n - SHELL\n - POWERSHELL\n script:\n type: string\n x-dcl-go-name: Script\n description: Required. The shell script to be executed.\n version:\n type: string\n x-dcl-go-name: Version\n description: The version of this software recipe. Version can be up\n to 4 period separated numbers (e.g. 12.34.56.78).\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Last time this GuestPolicy was updated.\n x-kubernetes-immutable: true\n") +var YAML_guest_policy = []byte("info:\n title: OSConfig/GuestPolicy\n description: The OSConfig GuestPolicy resource\n x-dcl-struct-name: GuestPolicy\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a GuestPolicy\n parameters:\n - name: GuestPolicy\n required: true\n description: A full instance of a GuestPolicy\n apply:\n description: The function used to apply information about a GuestPolicy\n parameters:\n - name: GuestPolicy\n required: true\n description: A full instance of a GuestPolicy\n delete:\n description: The function used to delete a GuestPolicy\n parameters:\n - name: GuestPolicy\n required: true\n description: A full instance of a GuestPolicy\n deleteAll:\n description: The function used to delete all GuestPolicy\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many GuestPolicy\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n GuestPolicy:\n title: GuestPolicy\n x-dcl-id: projects/{{project}}/guestPolicies/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n properties:\n assignment:\n type: object\n x-dcl-go-name: Assignment\n x-dcl-go-type: GuestPolicyAssignment\n description: Specifies the VMs that are assigned this policy. This allows\n you to target sets or groups of VMs by different parameters such as labels,\n names, OS, or zones. Empty assignments will target ALL VMs underneath\n this policy. Conflict Management Policies that exist higher up in the\n resource hierarchy (closer to the Org) will override those lower down\n if there is a conflict. At the same level in the resource hierarchy (ie.\n within a project), the service will prevent the creation of multiple policies\n that conflict with each other. If there are multiple policies that specify\n the same config (eg. package, software recipe, repository, etc.), the\n service will ensure that no VM could potentially receive instructions\n from both policies. To create multiple policies that specify different\n versions of a package or different configs for different Operating Systems,\n each policy must be mutually exclusive in their targeting according to\n labels, OS, or other criteria. Different configs are identified for conflicts\n in different ways. Packages are identified by their name and the package\n manager(s) they target. Package repositories are identified by their unique\n id where applicable. Some package managers don't have a unique identifier\n for repositories and where that's the case, no uniqueness is validated\n by the service. Note that if OS Inventory is disabled, a VM will not be\n assigned a policy that targets by OS because the service will see this\n VM's OS as unknown.\n properties:\n groupLabels:\n type: array\n x-dcl-go-name: GroupLabels\n description: Targets instances matching at least one of these label\n sets. This allows an assignment to target disparate groups, for example\n \"env=prod or env=staging\".\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyAssignmentGroupLabels\n properties:\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Google Compute Engine instance labels that must be\n present for an instance to be included in this assignment group.\n instanceNamePrefixes:\n type: array\n x-dcl-go-name: InstanceNamePrefixes\n description: Targets VM instances whose name starts with one of these\n prefixes. Like labels, this is another way to group VM instances when\n targeting configs, for example prefix=\"prod-\". Only supported for\n project-level policies.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n instances:\n type: array\n x-dcl-go-name: Instances\n description: Targets any of the instances specified. Instances are specified\n by their URI in the form `zones/[ZONE]/instances/[INSTANCE_NAME]`.\n Instance targeting is uncommon and is supported to facilitate the\n management of changes by the instance or to target specific VM instances\n for development and testing. Only supported for project-level policies\n and must reference instances within this project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Instance\n field: selfLink\n osTypes:\n type: array\n x-dcl-go-name: OSTypes\n description: Targets VM instances matching at least one of the following\n OS types. VM instances must match all supplied criteria for a given\n OsType to be included.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyAssignmentOSTypes\n properties:\n osArchitecture:\n type: string\n x-dcl-go-name: OSArchitecture\n description: Targets VM instances with OS Inventory enabled and\n having the following OS architecture.\n osShortName:\n type: string\n x-dcl-go-name: OSShortName\n description: Targets VM instances with OS Inventory enabled and\n having the following OS short name, for example \"debian\" or\n \"windows\".\n osVersion:\n type: string\n x-dcl-go-name: OSVersion\n description: Targets VM instances with OS Inventory enabled and\n having the following following OS version.\n zones:\n type: array\n x-dcl-go-name: Zones\n description: Targets instances in any of these zones. Leave empty to\n target instances in any zone. Zonal targeting is uncommon and is supported\n to facilitate the management of changes by zone.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Time this GuestPolicy was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Description of the GuestPolicy. Length of the description is\n limited to 1024 characters.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: The etag for this GuestPolicy. If this is provided on update,\n it must match the server's etag.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Unique name of the resource in this project using the form:\n `projects/{project_id}/guestPolicies/{guest_policy_id}`.'\n packageRepositories:\n type: array\n x-dcl-go-name: PackageRepositories\n description: List of package repository configurations assigned to the VM\n instance.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyPackageRepositories\n properties:\n apt:\n type: object\n x-dcl-go-name: Apt\n x-dcl-go-type: GuestPolicyPackageRepositoriesApt\n description: An Apt Repository.\n x-dcl-conflicts:\n - goo\n - yum\n - zypper\n required:\n - uri\n - distribution\n properties:\n archiveType:\n type: string\n x-dcl-go-name: ArchiveType\n x-dcl-go-type: GuestPolicyPackageRepositoriesAptArchiveTypeEnum\n description: 'Type of archive files in this repository. The default\n behavior is DEB. Possible values: ARCHIVE_TYPE_UNSPECIFIED,\n DEB, DEB_SRC'\n enum:\n - ARCHIVE_TYPE_UNSPECIFIED\n - DEB\n - DEB_SRC\n components:\n type: array\n x-dcl-go-name: Components\n description: Required. List of components for this repository.\n Must contain at least one item.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n distribution:\n type: string\n x-dcl-go-name: Distribution\n description: Required. Distribution of this repository.\n gpgKey:\n type: string\n x-dcl-go-name: GpgKey\n description: URI of the key file for this repository. The agent\n maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`\n containing all the keys in any applied guest policy.\n uri:\n type: string\n x-dcl-go-name: Uri\n description: Required. URI for this repository.\n goo:\n type: object\n x-dcl-go-name: Goo\n x-dcl-go-type: GuestPolicyPackageRepositoriesGoo\n description: A Goo Repository.\n x-dcl-conflicts:\n - apt\n - yum\n - zypper\n required:\n - name\n - url\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. The name of the repository.\n url:\n type: string\n x-dcl-go-name: Url\n description: Required. The url of the repository.\n yum:\n type: object\n x-dcl-go-name: Yum\n x-dcl-go-type: GuestPolicyPackageRepositoriesYum\n description: A Yum Repository.\n x-dcl-conflicts:\n - apt\n - goo\n - zypper\n required:\n - id\n - baseUrl\n properties:\n baseUrl:\n type: string\n x-dcl-go-name: BaseUrl\n description: Required. The location of the repository directory.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: The display name of the repository.\n gpgKeys:\n type: array\n x-dcl-go-name: GpgKeys\n description: URIs of GPG keys.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n id:\n type: string\n x-dcl-go-name: Id\n description: Required. A one word, unique name for this repository.\n This is the `repo id` in the Yum config file and also the `display_name`\n if `display_name` is omitted. This id is also used as the unique\n identifier when checking for guest policy conflicts.\n zypper:\n type: object\n x-dcl-go-name: Zypper\n x-dcl-go-type: GuestPolicyPackageRepositoriesZypper\n description: A Zypper Repository.\n x-dcl-conflicts:\n - apt\n - goo\n - yum\n required:\n - id\n - baseUrl\n properties:\n baseUrl:\n type: string\n x-dcl-go-name: BaseUrl\n description: Required. The location of the repository directory.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: The display name of the repository.\n gpgKeys:\n type: array\n x-dcl-go-name: GpgKeys\n description: URIs of GPG keys.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n id:\n type: string\n x-dcl-go-name: Id\n description: Required. A one word, unique name for this repository.\n This is the `repo id` in the zypper config file and also the\n `display_name` if `display_name` is omitted. This id is also\n used as the unique identifier when checking for guest policy\n conflicts.\n packages:\n type: array\n x-dcl-go-name: Packages\n description: List of package configurations assigned to the VM instance.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyPackages\n properties:\n desiredState:\n type: string\n x-dcl-go-name: DesiredState\n x-dcl-go-type: GuestPolicyPackagesDesiredStateEnum\n description: 'The desired_state the agent should maintain for this\n package. The default is to ensure the package is installed. Possible\n values: DESIRED_STATE_UNSPECIFIED, INSTALLED, REMOVED'\n enum:\n - DESIRED_STATE_UNSPECIFIED\n - INSTALLED\n - REMOVED\n manager:\n type: string\n x-dcl-go-name: Manager\n x-dcl-go-type: GuestPolicyPackagesManagerEnum\n description: 'Type of package manager that can be used to install\n this package. If a system does not have the package manager, the\n package is not installed or removed no error message is returned.\n By default, or if you specify `ANY`, the agent attempts to install\n and remove this package using the default package manager. This\n is useful when creating a policy that applies to different types\n of systems. The default behavior is ANY. Possible values: MANAGER_UNSPECIFIED,\n ANY, APT, YUM, ZYPPER, GOO'\n enum:\n - MANAGER_UNSPECIFIED\n - ANY\n - APT\n - YUM\n - ZYPPER\n - GOO\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. The name of the package. A package is uniquely\n identified for conflict validation by checking the package name\n and the manager(s) that the package targets.\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n recipes:\n type: array\n x-dcl-go-name: Recipes\n description: Optional. A list of Recipes to install on the VM.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyRecipes\n properties:\n artifacts:\n type: array\n x-dcl-go-name: Artifacts\n description: Resources available to be used in the steps in the recipe.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyRecipesArtifacts\n properties:\n allowInsecure:\n type: boolean\n x-dcl-go-name: AllowInsecure\n description: 'Defaults to false. When false, recipes are subject\n to validations based on the artifact type: Remote: A checksum\n must be specified, and only protocols with transport-layer\n security are permitted. GCS: An object generation number must\n be specified.'\n gcs:\n type: object\n x-dcl-go-name: Gcs\n x-dcl-go-type: GuestPolicyRecipesArtifactsGcs\n description: A Google Cloud Storage artifact.\n properties:\n bucket:\n type: string\n x-dcl-go-name: Bucket\n description: 'Bucket of the Google Cloud Storage object.\n Given an example URL: `https://storage.googleapis.com/my-bucket/foo/bar#1234567`\n this value would be `my-bucket`.'\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n description: Must be provided if allow_insecure is false.\n Generation number of the Google Cloud Storage object.\n `https://storage.googleapis.com/my-bucket/foo/bar#1234567`\n this value would be `1234567`.\n object:\n type: string\n x-dcl-go-name: Object\n description: 'Name of the Google Cloud Storage object. As\n specified [here] (https://cloud.google.com/storage/docs/naming#objectnames)\n Given an example URL: `https://storage.googleapis.com/my-bucket/foo/bar#1234567`\n this value would be `foo/bar`.'\n id:\n type: string\n x-dcl-go-name: Id\n description: Required. Id of the artifact, which the installation\n and update steps of this recipe can reference. Artifacts in\n a recipe cannot have the same id.\n remote:\n type: object\n x-dcl-go-name: Remote\n x-dcl-go-type: GuestPolicyRecipesArtifactsRemote\n description: A generic remote artifact.\n properties:\n checksum:\n type: string\n x-dcl-go-name: Checksum\n description: Must be provided if `allow_insecure` is `false`.\n SHA256 checksum in hex format, to compare to the checksum\n of the artifact. If the checksum is not empty and it doesn't\n match the artifact then the recipe installation fails\n before running any of the steps.\n uri:\n type: string\n x-dcl-go-name: Uri\n description: URI from which to fetch the object. It should\n contain both the protocol and path following the format\n {protocol}://{location}.\n desiredState:\n type: string\n x-dcl-go-name: DesiredState\n x-dcl-go-type: GuestPolicyRecipesDesiredStateEnum\n description: 'Default is INSTALLED. The desired state the agent should\n maintain for this recipe. INSTALLED: The software recipe is installed\n on the instance but won''t be updated to new versions. UPDATED:\n The software recipe is installed on the instance. The recipe is\n updated to a higher version, if a higher version of the recipe is\n assigned to this instance. REMOVE: Remove is unsupported for software\n recipes and attempts to create or update a recipe to the REMOVE\n state is rejected. Possible values: DESIRED_STATE_UNSPECIFIED, INSTALLED,\n REMOVED'\n enum:\n - DESIRED_STATE_UNSPECIFIED\n - INSTALLED\n - REMOVED\n installSteps:\n type: array\n x-dcl-go-name: InstallSteps\n description: Actions to be taken for installing this recipe. On failure\n it stops executing steps and does not attempt another installation.\n Any steps taken (including partially completed steps) are not rolled\n back.\n x-dcl-conflicts:\n - updateSteps\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyRecipesInstallSteps\n properties:\n archiveExtraction:\n type: object\n x-dcl-go-name: ArchiveExtraction\n x-dcl-go-type: GuestPolicyRecipesInstallStepsArchiveExtraction\n description: Extracts an archive into the specified directory.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n destination:\n type: string\n x-dcl-go-name: Destination\n description: Directory to extract archive to. Defaults to\n `/` on Linux or `C:` on Windows.\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: GuestPolicyRecipesInstallStepsArchiveExtractionTypeEnum\n description: 'Required. The type of the archive to extract.\n Possible values: TYPE_UNSPECIFIED, VALIDATION, DESIRED_STATE_CHECK,\n DESIRED_STATE_ENFORCEMENT, DESIRED_STATE_CHECK_POST_ENFORCEMENT'\n enum:\n - TYPE_UNSPECIFIED\n - VALIDATION\n - DESIRED_STATE_CHECK\n - DESIRED_STATE_ENFORCEMENT\n - DESIRED_STATE_CHECK_POST_ENFORCEMENT\n dpkgInstallation:\n type: object\n x-dcl-go-name: DpkgInstallation\n x-dcl-go-type: GuestPolicyRecipesInstallStepsDpkgInstallation\n description: Installs a deb file via dpkg.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n fileCopy:\n type: object\n x-dcl-go-name: FileCopy\n x-dcl-go-type: GuestPolicyRecipesInstallStepsFileCopy\n description: Copies a file onto the instance.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n destination:\n type: string\n x-dcl-go-name: Destination\n description: Required. The absolute path on the instance\n to put the file.\n overwrite:\n type: boolean\n x-dcl-go-name: Overwrite\n description: Whether to allow this step to overwrite existing\n files. If this is false and the file already exists the\n file is not overwritten and the step is considered a success.\n Defaults to false.\n permissions:\n type: string\n x-dcl-go-name: Permissions\n description: 'Consists of three octal digits which represent,\n in order, the permissions of the owner, group, and other\n users for the file (similarly to the numeric mode used\n in the linux chmod utility). Each digit represents a three\n bit number with the 4 bit corresponding to the read permissions,\n the 2 bit corresponds to the write bit, and the one bit\n corresponds to the execute permission. Default behavior\n is 755. Below are some examples of permissions and their\n associated values: read, write, and execute: 7 read and\n execute: 5 read and write: 6 read only: 4'\n fileExec:\n type: object\n x-dcl-go-name: FileExec\n x-dcl-go-type: GuestPolicyRecipesInstallStepsFileExec\n description: Executes an artifact or local file.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Defaults to [0]. A list of possible return\n values that the program can return to indicate a success.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n args:\n type: array\n x-dcl-go-name: Args\n description: Arguments to be passed to the provided executable.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: The id of the relevant artifact in the recipe.\n localPath:\n type: string\n x-dcl-go-name: LocalPath\n description: The absolute path of the file on the local\n filesystem.\n msiInstallation:\n type: object\n x-dcl-go-name: MsiInstallation\n x-dcl-go-type: GuestPolicyRecipesInstallStepsMsiInstallation\n description: Installs an MSI file.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Return codes that indicate that the software\n installed or updated successfully. Behaviour defaults\n to [0]\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n flags:\n type: array\n x-dcl-go-name: Flags\n description: The flags to use when installing the MSI defaults\n to [\"/i\"] (i.e. the install flag).\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n rpmInstallation:\n type: object\n x-dcl-go-name: RpmInstallation\n x-dcl-go-type: GuestPolicyRecipesInstallStepsRpmInstallation\n description: Installs an rpm file via the rpm utility.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n scriptRun:\n type: object\n x-dcl-go-name: ScriptRun\n x-dcl-go-type: GuestPolicyRecipesInstallStepsScriptRun\n description: Runs commands in a shell.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Return codes that indicate that the software\n installed or updated successfully. Behaviour defaults\n to [0]\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n interpreter:\n type: string\n x-dcl-go-name: Interpreter\n x-dcl-go-type: GuestPolicyRecipesInstallStepsScriptRunInterpreterEnum\n description: 'The script interpreter to use to run the script.\n If no interpreter is specified the script is executed\n directly, which likely only succeed for scripts with [shebang\n lines](https://en.wikipedia.org/wiki/Shebang_(Unix)).\n Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL,\n POWERSHELL'\n enum:\n - INTERPRETER_UNSPECIFIED\n - NONE\n - SHELL\n - POWERSHELL\n script:\n type: string\n x-dcl-go-name: Script\n description: Required. The shell script to be executed.\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Unique identifier for the recipe. Only one\n recipe with a given name is installed on an instance. Names are\n also used to identify resources which helps to determine whether\n guest policies have conflicts. This means that requests to create\n multiple recipes with the same name and version are rejected since\n they could potentially have conflicting assignments.\n updateSteps:\n type: array\n x-dcl-go-name: UpdateSteps\n description: Actions to be taken for updating this recipe. On failure\n it stops executing steps and does not attempt another update for\n this recipe. Any steps taken (including partially completed steps)\n are not rolled back.\n x-dcl-conflicts:\n - installSteps\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: GuestPolicyRecipesUpdateSteps\n properties:\n archiveExtraction:\n type: object\n x-dcl-go-name: ArchiveExtraction\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsArchiveExtraction\n description: Extracts an archive into the specified directory.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n destination:\n type: string\n x-dcl-go-name: Destination\n description: Directory to extract archive to. Defaults to\n `/` on Linux or `C:` on Windows.\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsArchiveExtractionTypeEnum\n description: 'Required. The type of the archive to extract.\n Possible values: TYPE_UNSPECIFIED, VALIDATION, DESIRED_STATE_CHECK,\n DESIRED_STATE_ENFORCEMENT, DESIRED_STATE_CHECK_POST_ENFORCEMENT'\n enum:\n - TYPE_UNSPECIFIED\n - VALIDATION\n - DESIRED_STATE_CHECK\n - DESIRED_STATE_ENFORCEMENT\n - DESIRED_STATE_CHECK_POST_ENFORCEMENT\n dpkgInstallation:\n type: object\n x-dcl-go-name: DpkgInstallation\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsDpkgInstallation\n description: Installs a deb file via dpkg.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n fileCopy:\n type: object\n x-dcl-go-name: FileCopy\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsFileCopy\n description: Copies a file onto the instance.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n destination:\n type: string\n x-dcl-go-name: Destination\n description: Required. The absolute path on the instance\n to put the file.\n overwrite:\n type: boolean\n x-dcl-go-name: Overwrite\n description: Whether to allow this step to overwrite existing\n files. If this is false and the file already exists the\n file is not overwritten and the step is considered a success.\n Defaults to false.\n permissions:\n type: string\n x-dcl-go-name: Permissions\n description: 'Consists of three octal digits which represent,\n in order, the permissions of the owner, group, and other\n users for the file (similarly to the numeric mode used\n in the linux chmod utility). Each digit represents a three\n bit number with the 4 bit corresponding to the read permissions,\n the 2 bit corresponds to the write bit, and the one bit\n corresponds to the execute permission. Default behavior\n is 755. Below are some examples of permissions and their\n associated values: read, write, and execute: 7 read and\n execute: 5 read and write: 6 read only: 4'\n fileExec:\n type: object\n x-dcl-go-name: FileExec\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsFileExec\n description: Executes an artifact or local file.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Defaults to [0]. A list of possible return\n values that the program can return to indicate a success.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n args:\n type: array\n x-dcl-go-name: Args\n description: Arguments to be passed to the provided executable.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: The id of the relevant artifact in the recipe.\n localPath:\n type: string\n x-dcl-go-name: LocalPath\n description: The absolute path of the file on the local\n filesystem.\n msiInstallation:\n type: object\n x-dcl-go-name: MsiInstallation\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsMsiInstallation\n description: Installs an MSI file.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Return codes that indicate that the software\n installed or updated successfully. Behaviour defaults\n to [0]\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n flags:\n type: array\n x-dcl-go-name: Flags\n description: The flags to use when installing the MSI defaults\n to [\"/i\"] (i.e. the install flag).\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n rpmInstallation:\n type: object\n x-dcl-go-name: RpmInstallation\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsRpmInstallation\n description: Installs an rpm file via the rpm utility.\n properties:\n artifactId:\n type: string\n x-dcl-go-name: ArtifactId\n description: Required. The id of the relevant artifact in\n the recipe.\n scriptRun:\n type: object\n x-dcl-go-name: ScriptRun\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsScriptRun\n description: Runs commands in a shell.\n properties:\n allowedExitCodes:\n type: array\n x-dcl-go-name: AllowedExitCodes\n description: Return codes that indicate that the software\n installed or updated successfully. Behaviour defaults\n to [0]\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n interpreter:\n type: string\n x-dcl-go-name: Interpreter\n x-dcl-go-type: GuestPolicyRecipesUpdateStepsScriptRunInterpreterEnum\n description: 'The script interpreter to use to run the script.\n If no interpreter is specified the script is executed\n directly, which likely only succeed for scripts with [shebang\n lines](https://en.wikipedia.org/wiki/Shebang_(Unix)).\n Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL,\n POWERSHELL'\n enum:\n - INTERPRETER_UNSPECIFIED\n - NONE\n - SHELL\n - POWERSHELL\n script:\n type: string\n x-dcl-go-name: Script\n description: Required. The shell script to be executed.\n version:\n type: string\n x-dcl-go-name: Version\n description: The version of this software recipe. Version can be up\n to 4 period separated numbers (e.g. 12.34.56.78).\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Last time this GuestPolicy was updated.\n x-kubernetes-immutable: true\n") -// 45616 bytes -// MD5: c87bbe6142ee24ff79fc1932da5c0216 +// 45613 bytes +// MD5: 1fdc2d058c7650450cc44a2e73362013 diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy_schema.go index 48882d3ab7..9147befa33 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/guest_policy_schema.go @@ -525,7 +525,7 @@ func DCLGuestPolicySchema() *dcl.Schema { "uri": &dcl.Property{ Type: "string", GoName: "Uri", - Description: "URI from which to fetch the object. It should contain both the protocol and path following the format: {protocol}://{location}.", + Description: "URI from which to fetch the object. It should contain both the protocol and path following the format {protocol}://{location}.", }, }, }, diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/os_policy_assignment.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/os_policy_assignment.go index 09567aa53c..9a9ce955a9 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/os_policy_assignment.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/os_policy_assignment.go @@ -2693,10 +2693,9 @@ func (c *Client) GetOSPolicyAssignment(ctx context.Context, r *OSPolicyAssignmen if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/patch_deployment.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/patch_deployment.go index 87dce6902f..d56b02ff53 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/patch_deployment.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/beta/patch_deployment.go @@ -1879,9 +1879,8 @@ func (c *Client) GetPatchDeployment(ctx context.Context, r *PatchDeployment) (*P if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/ca_pool.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/ca_pool.go index 19318acba2..f528e8195f 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/ca_pool.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/ca_pool.go @@ -1217,10 +1217,9 @@ func (c *Client) GetCaPool(ctx context.Context, r *CaPool) (*CaPool, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate.go index 4831877a6c..1036feefae 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate.go @@ -2107,11 +2107,10 @@ func (c *Client) GetCertificate(ctx context.Context, r *Certificate) (*Certifica if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.CaPool = nr.CaPool - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.CaPool = r.CaPool + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate_authority.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate_authority.go index 51f8439933..a976212b68 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate_authority.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate_authority.go @@ -2404,11 +2404,10 @@ func (c *Client) GetCertificateAuthority(ctx context.Context, r *CertificateAuth if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.CaPool = nr.CaPool - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.CaPool = r.CaPool + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate_template.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate_template.go index f25abf1403..23671b7368 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate_template.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/beta/certificate_template.go @@ -866,10 +866,9 @@ func (c *Client) GetCertificateTemplate(ctx context.Context, r *CertificateTempl if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.go index 8b6c3b7b6d..b208067014 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.go @@ -1217,10 +1217,9 @@ func (c *Client) GetCaPool(ctx context.Context, r *CaPool) (*CaPool, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate.go index f6a747359a..900ec6a6ad 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate.go @@ -2107,11 +2107,10 @@ func (c *Client) GetCertificate(ctx context.Context, r *Certificate) (*Certifica if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.CaPool = nr.CaPool - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.CaPool = r.CaPool + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.go index 754464b47a..0e098eff59 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.go @@ -2404,11 +2404,10 @@ func (c *Client) GetCertificateAuthority(ctx context.Context, r *CertificateAuth if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.CaPool = nr.CaPool - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.CaPool = r.CaPool + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_template.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_template.go index b08b616451..2d14719b74 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_template.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_template.go @@ -866,10 +866,9 @@ func (c *Client) GetCertificateTemplate(ctx context.Context, r *CertificateTempl if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/pubsub/subscription.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/pubsub/subscription.yaml index c374658d97..a8acb181e9 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/pubsub/subscription.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/pubsub/subscription.yaml @@ -196,7 +196,7 @@ components: description: |- Endpoint configuration attributes that can be used to control different aspects of the message delivery. The only currently supported attribute is `x-goog-version`, which you can use to change the format of the pushed message. This attribute indicates the version of the data expected by the endpoint. This controls the shape of the pushed message (i.e., its fields and metadata). If not present during the `CreateSubscription` call, it will default to the version of the Pub/Sub API used to make such call. If not present in a `ModifyPushConfig` call, its value will not be changed. `GetSubscription` calls will always return a valid version, even if the subscription was created without this attribute. The only supported values for the `x-goog-version` attribute are: * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API. For example: - attributes: { "x-goog-version": "v1" } + attributes { "x-goog-version": "v1" } x-kubernetes-immutable: true default: '{"x-goog-version":"v1"}' oidcToken: diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/pubsub/topic.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/pubsub/topic.go index 351ff1fba0..6d296c5595 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/pubsub/topic.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/pubsub/topic.go @@ -192,9 +192,8 @@ func (c *Client) GetTopic(ctx context.Context, r *Topic) (*Topic, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/beta/key.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/beta/key.go index 3eef97bc09..bb157fb937 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/beta/key.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/beta/key.go @@ -440,9 +440,8 @@ func (c *Client) GetKey(ctx context.Context, r *Key) (*Key, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/beta/key_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/beta/key_internal.go index b80e3257af..a5ac11aa91 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/beta/key_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/beta/key_internal.go @@ -343,11 +343,8 @@ func (op *createKeyOperation) do(ctx context.Context, r *Key, c *Client) error { op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetKey(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/key.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/key.go index 3b5178885e..f340f77e3e 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/key.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/key.go @@ -440,9 +440,8 @@ func (c *Client) GetKey(ctx context.Context, r *Key) (*Key, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/key_internal.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/key_internal.go index 2ed3df407b..f7a731958c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/key_internal.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/key_internal.go @@ -343,11 +343,8 @@ func (op *createKeyOperation) do(ctx context.Context, r *Key, c *Client) error { op.response = o // Include Name in URL substitution for initial GET request. - name, ok := op.response["name"].(string) - if !ok { - return fmt.Errorf("expected name to be a string in %v, was %T", op.response, op.response["name"]) - } - r.Name = &name + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) if _, err := c.GetKey(ctx, r); err != nil { c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job.go index b249eb2e16..c6b6989482 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job.go @@ -1619,10 +1619,9 @@ func (c *Client) GetJob(ctx context.Context, r *Job) (*Job, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job.yaml index f8e5fd05d2..c3ba5a49a6 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job.yaml @@ -552,10 +552,10 @@ components: secret: type: string x-dcl-go-name: Secret - description: 'Required. The name of the secret - in Cloud Secret Manager. Format: {secret_name} + description: Required. The name of the secret + in Cloud Secret Manager. Format {secret_name} if the secret is in the same project. projects/{project}/secrets/{secret_name} - if the secret is in a different project.' + if the secret is in a different project. x-dcl-references: - resource: Secretmanager/Secret field: selfLink @@ -724,11 +724,11 @@ components: instances: type: array x-dcl-go-name: Instances - description: 'The Cloud SQL instance connection names, + description: The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and - Cloud Run. Format: {project}:{location}:{instance}' + Cloud Run. Format {project}:{location}:{instance} x-dcl-send-empty: true x-dcl-list-type: list items: @@ -821,10 +821,10 @@ components: secret: type: string x-dcl-go-name: Secret - description: 'Required. The name of the secret in Cloud - Secret Manager. Format: {secret} if the secret is in + description: Required. The name of the secret in Cloud + Secret Manager. Format {secret} if the secret is in the same project. projects/{project}/secrets/{secret} - if the secret is in a different project.' + if the secret is in a different project. vpcAccess: type: object x-dcl-go-name: VPCAccess diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job_alpha_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job_alpha_yaml_embed.go index d88cfa4e69..a726b683d1 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job_alpha_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job_alpha_yaml_embed.go @@ -17,7 +17,7 @@ package alpha // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/run/alpha/job.yaml -var YAML_job = []byte("info:\n title: Run/Job\n description: The Run Job resource\n x-dcl-struct-name: Job\n x-dcl-has-iam: true\npaths:\n get:\n description: The function used to get information about a Job\n parameters:\n - name: Job\n required: true\n description: A full instance of a Job\n apply:\n description: The function used to apply information about a Job\n parameters:\n - name: Job\n required: true\n description: A full instance of a Job\n delete:\n description: The function used to delete a Job\n parameters:\n - name: Job\n required: true\n description: A full instance of a Job\n deleteAll:\n description: The function used to delete all Job\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Job\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Job:\n title: Job\n x-dcl-id: projects/{{project}}/locations/{{location}}/jobs/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: true\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - template\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'KRM-style annotations for the resource. Unstructured key value\n map that may be set by external tools to store and arbitrary metadata.\n They are not queryable and should be preserved when modifying objects.\n Cloud Run will populate some annotations using ''run.googleapis.com''\n or ''serving.knative.dev'' namespaces. This field follows Kubernetes annotations''\n namespacing, limits, and rules. More info: https://kubernetes.io/docs/user-guide/annotations'\n binaryAuthorization:\n type: object\n x-dcl-go-name: BinaryAuthorization\n x-dcl-go-type: JobBinaryAuthorization\n description: Settings for the Binary Authorization feature.\n properties:\n breakglassJustification:\n type: string\n x-dcl-go-name: BreakglassJustification\n description: If present, indicates to use Breakglass using this justification.\n If use_default is False, then it must be empty. For more information\n on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass\n useDefault:\n type: boolean\n x-dcl-go-name: UseDefault\n description: If True, indicates to use the default project's binary\n authorization policy. If False, binary authorization will be disabled.\n client:\n type: string\n x-dcl-go-name: Client\n description: Arbitrary identifier for the API client.\n clientVersion:\n type: string\n x-dcl-go-name: ClientVersion\n description: Arbitrary version identifier for the API client.\n conditions:\n type: array\n x-dcl-go-name: Conditions\n readOnly: true\n description: Output only. The Conditions of all other associated sub-resources.\n They contain additional diagnostics information in case the Job does not\n reach its desired state. See comments in `reconciling` for additional\n information on reconciliation process in Cloud Run.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobConditions\n properties:\n executionReason:\n type: string\n x-dcl-go-name: ExecutionReason\n x-dcl-go-type: JobConditionsExecutionReasonEnum\n description: 'A reason for the execution condition. Possible values:\n EXECUTION_REASON_UNDEFINED, JOB_STATUS_SERVICE_POLLING_ERROR, NON_ZERO_EXIT_CODE'\n x-dcl-conflicts:\n - reason\n - revisionReason\n enum:\n - EXECUTION_REASON_UNDEFINED\n - JOB_STATUS_SERVICE_POLLING_ERROR\n - NON_ZERO_EXIT_CODE\n lastTransitionTime:\n type: string\n format: date-time\n x-dcl-go-name: LastTransitionTime\n description: Last time the condition transitioned from one status\n to another.\n message:\n type: string\n x-dcl-go-name: Message\n description: Human readable message indicating details about the current\n status.\n reason:\n type: string\n x-dcl-go-name: Reason\n x-dcl-go-type: JobConditionsReasonEnum\n description: 'A common (service-level) reason for this condition.\n Possible values: COMMON_REASON_UNDEFINED, UNKNOWN, REVISION_FAILED,\n PROGRESS_DEADLINE_EXCEEDED, BUILD_STEP_FAILED, CONTAINER_MISSING,\n CONTAINER_PERMISSION_DENIED, CONTAINER_IMAGE_UNAUTHORIZED, CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED,\n ENCRYPTION_KEY_PERMISSION_DENIED, ENCRYPTION_KEY_CHECK_FAILED, SECRETS_ACCESS_CHECK_FAILED,\n WAITING_FOR_OPERATION, IMMEDIATE_RETRY, POSTPONED_RETRY'\n x-dcl-conflicts:\n - revisionReason\n - executionReason\n enum:\n - COMMON_REASON_UNDEFINED\n - UNKNOWN\n - REVISION_FAILED\n - PROGRESS_DEADLINE_EXCEEDED\n - BUILD_STEP_FAILED\n - CONTAINER_MISSING\n - CONTAINER_PERMISSION_DENIED\n - CONTAINER_IMAGE_UNAUTHORIZED\n - CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED\n - ENCRYPTION_KEY_PERMISSION_DENIED\n - ENCRYPTION_KEY_CHECK_FAILED\n - SECRETS_ACCESS_CHECK_FAILED\n - WAITING_FOR_OPERATION\n - IMMEDIATE_RETRY\n - POSTPONED_RETRY\n revisionReason:\n type: string\n x-dcl-go-name: RevisionReason\n x-dcl-go-type: JobConditionsRevisionReasonEnum\n description: 'A reason for the revision condition. Possible values:\n REVISION_REASON_UNDEFINED, PENDING, RESERVE, RETIRED, RETIRING,\n RECREATING, HEALTH_CHECK_CONTAINER_ERROR, CUSTOMIZED_PATH_RESPONSE_PENDING,\n MIN_INSTANCES_NOT_PROVISIONED, ACTIVE_REVISION_LIMIT_REACHED, NO_DEPLOYMENT,\n HEALTH_CHECK_SKIPPED'\n x-dcl-conflicts:\n - reason\n - executionReason\n enum:\n - REVISION_REASON_UNDEFINED\n - PENDING\n - RESERVE\n - RETIRED\n - RETIRING\n - RECREATING\n - HEALTH_CHECK_CONTAINER_ERROR\n - CUSTOMIZED_PATH_RESPONSE_PENDING\n - MIN_INSTANCES_NOT_PROVISIONED\n - ACTIVE_REVISION_LIMIT_REACHED\n - NO_DEPLOYMENT\n - HEALTH_CHECK_SKIPPED\n severity:\n type: string\n x-dcl-go-name: Severity\n x-dcl-go-type: JobConditionsSeverityEnum\n description: 'How to interpret failures of this condition, one of\n Error, Warning, Info Possible values: SEVERITY_UNSPECIFIED, ERROR,\n WARNING, INFO'\n enum:\n - SEVERITY_UNSPECIFIED\n - ERROR\n - WARNING\n - INFO\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: JobConditionsStateEnum\n description: 'State of the condition. Possible values: STATE_UNSPECIFIED,\n CONDITION_PENDING, CONDITION_RECONCILING, CONDITION_FAILED, CONDITION_SUCCEEDED'\n enum:\n - STATE_UNSPECIFIED\n - CONDITION_PENDING\n - CONDITION_RECONCILING\n - CONDITION_FAILED\n - CONDITION_SUCCEEDED\n type:\n type: string\n x-dcl-go-name: Type\n description: 'type is used to communicate the status of the reconciliation\n process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting\n Types common to all resources include: * \"Ready\": True when the\n Resource is ready.'\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation time.\n x-kubernetes-immutable: true\n creator:\n type: string\n x-dcl-go-name: Creator\n readOnly: true\n description: Output only. Email address of the authenticated creator.\n x-kubernetes-immutable: true\n deleteTime:\n type: string\n format: date-time\n x-dcl-go-name: DeleteTime\n readOnly: true\n description: Output only. The deletion time.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Output only. A system-generated fingerprint for this version\n of the resource. May be used to detect modification conflict during updates.\n x-kubernetes-immutable: true\n executionCount:\n type: integer\n format: int64\n x-dcl-go-name: ExecutionCount\n readOnly: true\n description: Output only. Number of executions created for this job.\n x-kubernetes-immutable: true\n expireTime:\n type: string\n format: date-time\n x-dcl-go-name: ExpireTime\n readOnly: true\n description: Output only. For a deleted resource, the time after which it\n will be permamently deleted.\n x-kubernetes-immutable: true\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n readOnly: true\n description: Output only. A number that monotonically increases every time\n the user modifies the desired state.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n readOnly: true\n description: KRM-style labels for the resource. User-provided labels are\n shared with Google's billing system, so they can be used to filter, or\n break down billing charges by team, component, environment, state, etc.\n For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels\n or https://cloud.google.com/run/docs/configuring/labels Cloud Run will\n populate some labels with 'run.googleapis.com' or 'serving.knative.dev'\n namespaces. Those labels are read-only, and user changes will not be preserved.\n x-kubernetes-immutable: true\n lastModifier:\n type: string\n x-dcl-go-name: LastModifier\n readOnly: true\n description: Output only. Email address of the last authenticated modifier.\n x-kubernetes-immutable: true\n latestCreatedExecution:\n type: object\n x-dcl-go-name: LatestCreatedExecution\n x-dcl-go-type: JobLatestCreatedExecution\n readOnly: true\n description: Output only. Name of the last created execution.\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n description: Creation timestamp of the execution.\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the execution.\n x-dcl-references:\n - resource: Run/Execution\n field: selfLink\n parent: true\n latestSucceededExecution:\n type: object\n x-dcl-go-name: LatestSucceededExecution\n x-dcl-go-type: JobLatestSucceededExecution\n readOnly: true\n description: Output only. Name of the last succeeded execution.\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n description: Creation timestamp of the execution.\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the execution.\n x-dcl-references:\n - resource: Run/Execution\n field: selfLink\n parent: true\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: JobLaunchStageEnum\n description: 'The launch stage as defined by [Google Cloud Platform Launch\n Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports\n `ALPHA`, `BETA`, and `GA`. If no value is specified, GA is assumed. Possible\n values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS,\n ALPHA, BETA, GA, DEPRECATED'\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'The fully qualified name of this Job. Format: projects/{project}/locations/{location}/jobs/{job}'\n observedGeneration:\n type: integer\n format: int64\n x-dcl-go-name: ObservedGeneration\n readOnly: true\n description: Output only. The generation of this Job. See comments in `reconciling`\n for additional information on reconciliation process in Cloud Run.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: 'Output only. Returns true if the Job is currently being acted\n upon by the system to bring it into the desired state. When a new Job\n is created, or an existing one is updated, Cloud Run will asynchronously\n perform all necessary steps to bring the Job to the desired state. This\n process is called reconciliation. While reconciliation is in process,\n `observed_generation` and `latest_succeeded_execution`, will have transient\n values that might mismatch the intended state: Once reconciliation is\n over (and this field is false), there are two possible outcomes: reconciliation\n succeeded and the state matches the Job, or there was an error, and reconciliation\n failed. This state can be found in `terminal_condition.state`. If reconciliation\n succeeded, the following fields will match: `observed_generation` and\n `generation`, `latest_succeeded_execution` and `latest_created_execution`.\n If reconciliation failed, `observed_generation` and `latest_succeeded_execution`\n will have the state of the last succeeded execution or empty for newly\n created Job. Additional information on the failure can be found in `terminal_condition`\n and `conditions`.'\n x-kubernetes-immutable: true\n template:\n type: object\n x-dcl-go-name: Template\n x-dcl-go-type: JobTemplate\n description: Required. The template used to create executions for this Job.\n required:\n - template\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: KRM-style annotations for the resource.\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: KRM-style labels for the resource.\n parallelism:\n type: integer\n format: int64\n x-dcl-go-name: Parallelism\n description: 'Specifies the maximum desired number of tasks the execution\n should run at any given time. Must be <= task_count. The actual number\n of tasks running in steady state will be less than this number when\n ((.spec.task_count - .status.successful) < .spec.parallelism), i.e.\n when the work left to do is less than max parallelism. More info:\n https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/'\n x-dcl-server-default: true\n taskCount:\n type: integer\n format: int64\n x-dcl-go-name: TaskCount\n description: 'Specifies the desired number of tasks the execution should\n run. Setting to 1 means that parallelism is limited to 1 and the success\n of that task signals the success of the execution. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/'\n x-dcl-server-default: true\n template:\n type: object\n x-dcl-go-name: Template\n x-dcl-go-type: JobTemplateTemplate\n description: Required. Describes the task(s) that will be created when\n executing an execution.\n properties:\n containers:\n type: array\n x-dcl-go-name: Containers\n description: Holds the single container that defines the unit of\n execution for this task.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateContainers\n required:\n - image\n properties:\n args:\n type: array\n x-dcl-go-name: Args\n description: 'Arguments to the entrypoint. The docker image''s\n CMD is used if this is not provided. Variable references\n $(VAR_NAME) are expanded using the container''s environment.\n If a variable cannot be resolved, the reference in the input\n string will be unchanged. The $(VAR_NAME) syntax can be\n escaped with a double $$, ie: $$(VAR_NAME). Escaped references\n will never be expanded, regardless of whether the variable\n exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n command:\n type: array\n x-dcl-go-name: Command\n description: 'Entrypoint array. Not executed within a shell.\n The docker image''s ENTRYPOINT is used if this is not provided.\n Variable references $(VAR_NAME) are expanded using the container''s\n environment. If a variable cannot be resolved, the reference\n in the input string will be unchanged. The $(VAR_NAME) syntax\n can be escaped with a double $$, ie: $$(VAR_NAME). Escaped\n references will never be expanded, regardless of whether\n the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n env:\n type: array\n x-dcl-go-name: Env\n description: List of environment variables to set in the container.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateContainersEnv\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Name of the environment variable.\n Must be a C_IDENTIFIER, and mnay not exceed 32768\n characters.\n value:\n type: string\n x-dcl-go-name: Value\n description: 'Variable references $(VAR_NAME) are expanded\n using the previous defined environment variables in\n the container and any route environment variables.\n If a variable cannot be resolved, the reference in\n the input string will be unchanged. The $(VAR_NAME)\n syntax can be escaped with a double $$, ie: $$(VAR_NAME).\n Escaped references will never be expanded, regardless\n of whether the variable exists or not. Defaults to\n \"\", and the maximum length is 32768 bytes.'\n x-dcl-conflicts:\n - valueSource\n valueSource:\n type: object\n x-dcl-go-name: ValueSource\n x-dcl-go-type: JobTemplateTemplateContainersEnvValueSource\n description: Source for the environment variable's value.\n x-dcl-conflicts:\n - value\n properties:\n secretKeyRef:\n type: object\n x-dcl-go-name: SecretKeyRef\n x-dcl-go-type: JobTemplateTemplateContainersEnvValueSourceSecretKeyRef\n description: Selects a secret and a specific version\n from Cloud Secret Manager.\n required:\n - secret\n properties:\n secret:\n type: string\n x-dcl-go-name: Secret\n description: 'Required. The name of the secret\n in Cloud Secret Manager. Format: {secret_name}\n if the secret is in the same project. projects/{project}/secrets/{secret_name}\n if the secret is in a different project.'\n x-dcl-references:\n - resource: Secretmanager/Secret\n field: selfLink\n version:\n type: string\n x-dcl-go-name: Version\n description: The Cloud Secret Manager secret\n version. Can be 'latest' for the latest value\n or an integer for a specific version.\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: selfLink\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Required. URL of the Container image in Google\n Container Registry or Docker More info: https://kubernetes.io/docs/concepts/containers/images'\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the container specified as a DNS_LABEL.\n ports:\n type: array\n x-dcl-go-name: Ports\n description: List of ports to expose from the container. Only\n a single port can be specified. The specified ports must\n be listening on all interfaces (0.0.0.0) within the container\n to be accessible. If omitted, a port number will be chosen\n and passed to the container through the PORT environment\n variable for the container to listen on.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateContainersPorts\n properties:\n containerPort:\n type: integer\n format: int64\n x-dcl-go-name: ContainerPort\n description: Port number the container listens on. This\n must be a valid TCP port number, 0 < container_port\n < 65536.\n name:\n type: string\n x-dcl-go-name: Name\n description: If specified, used to specify which protocol\n to use. Allowed values are \"http1\" and \"h2c\".\n resources:\n type: object\n x-dcl-go-name: Resources\n x-dcl-go-type: JobTemplateTemplateContainersResources\n description: 'Compute Resource requirements by this container.\n More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'\n x-dcl-server-default: true\n properties:\n cpuIdle:\n type: boolean\n x-dcl-go-name: CpuIdle\n description: Determines whether CPU should be throttled\n or not outside of requests.\n x-dcl-server-default: true\n limits:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Limits\n description: 'Only memory and CPU are supported. Note:\n The only supported values for CPU are ''1'', ''2'',\n and ''4''. Setting 4 CPU requires at least 2Gi of memory.\n The values of the map is string form of the ''quantity''\n k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go'\n volumeMounts:\n type: array\n x-dcl-go-name: VolumeMounts\n description: Volume to mount into the container's filesystem.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateContainersVolumeMounts\n required:\n - name\n - mountPath\n properties:\n mountPath:\n type: string\n x-dcl-go-name: MountPath\n description: Required. Path within the container at\n which the volume should be mounted. Must not contain\n ':'. For Cloud SQL volumes, it can be left empty,\n or must otherwise be `/cloudsql`. All instances defined\n in the Volume will be available as `/cloudsql/[instance]`.\n For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. This must match the Name of a\n Volume.\n encryptionKey:\n type: string\n x-dcl-go-name: EncryptionKey\n description: A reference to a customer managed encryption key (CMEK)\n to use to encrypt this container image. For more information,\n go to https://cloud.google.com/run/docs/securing/using-cmek\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: selfLink\n executionEnvironment:\n type: string\n x-dcl-go-name: ExecutionEnvironment\n x-dcl-go-type: JobTemplateTemplateExecutionEnvironmentEnum\n description: 'The execution environment being used to host this\n Task. Possible values: EXECUTION_ENVIRONMENT_UNSPECIFIED, EXECUTION_ENVIRONMENT_DEFAULT,\n EXECUTION_ENVIRONMENT_GEN2'\n enum:\n - EXECUTION_ENVIRONMENT_UNSPECIFIED\n - EXECUTION_ENVIRONMENT_DEFAULT\n - EXECUTION_ENVIRONMENT_GEN2\n maxRetries:\n type: integer\n format: int64\n x-dcl-go-name: MaxRetries\n description: Number of retries allowed per Task, before marking\n this Task failed.\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Email address of the IAM service account associated\n with the Task of a Job. The service account represents the identity\n of the running task, and determines what permissions the task\n has. If not provided, the task will use the project's default\n service account.\n x-dcl-server-default: true\n timeout:\n type: string\n x-dcl-go-name: Timeout\n description: Max allowed time duration the Task may be active before\n the system will actively try to mark it failed and kill associated\n containers. This applies per attempt of a task, meaning each retry\n can run for the full timeout.\n x-dcl-server-default: true\n volumes:\n type: array\n x-dcl-go-name: Volumes\n description: A list of Volumes to make available to containers.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateVolumes\n required:\n - name\n properties:\n cloudSqlInstance:\n type: object\n x-dcl-go-name: CloudSqlInstance\n x-dcl-go-type: JobTemplateTemplateVolumesCloudSqlInstance\n description: For Cloud SQL volumes, contains the specific\n instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run\n for more information on how to connect Cloud SQL and Cloud\n Run.\n x-dcl-conflicts:\n - secret\n properties:\n instances:\n type: array\n x-dcl-go-name: Instances\n description: 'The Cloud SQL instance connection names,\n as can be found in https://console.cloud.google.com/sql/instances.\n Visit https://cloud.google.com/sql/docs/mysql/connect-run\n for more information on how to connect Cloud SQL and\n Cloud Run. Format: {project}:{location}:{instance}'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Volume's name.\n secret:\n type: object\n x-dcl-go-name: Secret\n x-dcl-go-type: JobTemplateTemplateVolumesSecret\n description: 'Secret represents a secret that should populate\n this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret'\n x-dcl-conflicts:\n - cloudSqlInstance\n required:\n - secret\n properties:\n defaultMode:\n type: integer\n format: int64\n x-dcl-go-name: DefaultMode\n description: 'Integer representation of mode bits to use\n on created files by default. Must be a value between\n 0000 and 0777 (octal), defaulting to 0644. Directories\n within the path are not affected by this setting. Notes\n * Internally, a umask of 0222 will be applied to any\n non-zero value. * This is an integer representation\n of the mode bits. So, the octal integer value should\n look exactly as the chmod numeric notation with a leading\n zero. Some examples: for chmod 777 (a=rwx), set to 0777\n (octal) or 511 (base-10). For chmod 640 (u=rw,g=r),\n set to 0640 (octal) or 416 (base-10). For chmod 755\n (u=rwx,g=rx,o=rx), set to 0755 (octal) or 493 (base-10).\n * This might be in conflict with other options that\n affect the file mode, like fsGroup, and the result can\n be other mode bits set. This might be in conflict with\n other options that affect the file mode, like fsGroup,\n and as a result, other mode bits could be set.'\n items:\n type: array\n x-dcl-go-name: Items\n description: If unspecified, the volume will expose a\n file whose name is the secret, relative to VolumeMount.mount_path.\n If specified, the key will be used as the version to\n fetch from Cloud Secret Manager and the path will be\n the name of the file exposed in the volume. When items\n are defined, they must specify a path and a version.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateVolumesSecretItems\n required:\n - path\n properties:\n mode:\n type: integer\n format: int64\n x-dcl-go-name: Mode\n description: 'Integer octal mode bits to use on\n this file, must be a value between 01 and 0777\n (octal). If 0 or not set, the Volume''s default\n mode will be used. Notes * Internally, a umask\n of 0222 will be applied to any non-zero value.\n * This is an integer representation of the mode\n bits. So, the octal integer value should look\n exactly as the chmod numeric notation with a leading\n zero. Some examples: for chmod 777 (a=rwx), set\n to 0777 (octal) or 511 (base-10). For chmod 640\n (u=rw,g=r), set to 0640 (octal) or 416 (base-10).\n For chmod 755 (u=rwx,g=rx,o=rx), set to 0755 (octal)\n or 493 (base-10). * This might be in conflict\n with other options that affect the file mode,\n like fsGroup, and the result can be other mode\n bits set.'\n path:\n type: string\n x-dcl-go-name: Path\n description: Required. The relative path of the\n secret in the container.\n version:\n type: string\n x-dcl-go-name: Version\n description: The Cloud Secret Manager secret version.\n Can be 'latest' for the latest value or an integer\n for a specific version.\n secret:\n type: string\n x-dcl-go-name: Secret\n description: 'Required. The name of the secret in Cloud\n Secret Manager. Format: {secret} if the secret is in\n the same project. projects/{project}/secrets/{secret}\n if the secret is in a different project.'\n vpcAccess:\n type: object\n x-dcl-go-name: VPCAccess\n x-dcl-go-type: JobTemplateTemplateVPCAccess\n description: VPC Access configuration to use for this Task. For\n more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc.\n properties:\n connector:\n type: string\n x-dcl-go-name: Connector\n description: 'VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}'\n x-dcl-references:\n - resource: Vpcaccess/Connector\n field: selfLink\n egress:\n type: string\n x-dcl-go-name: Egress\n x-dcl-go-type: JobTemplateTemplateVPCAccessEgressEnum\n description: 'Traffic VPC egress settings. Possible values:\n VPC_EGRESS_UNSPECIFIED, ALL_TRAFFIC, PRIVATE_RANGES_ONLY'\n enum:\n - VPC_EGRESS_UNSPECIFIED\n - ALL_TRAFFIC\n - PRIVATE_RANGES_ONLY\n terminalCondition:\n type: object\n x-dcl-go-name: TerminalCondition\n x-dcl-go-type: JobTerminalCondition\n readOnly: true\n description: Output only. The Condition of this Job, containing its readiness\n status, and detailed error information in case it did not reach the desired\n state.\n properties:\n domainMappingReason:\n type: string\n x-dcl-go-name: DomainMappingReason\n x-dcl-go-type: JobTerminalConditionDomainMappingReasonEnum\n description: 'A reason for the domain mapping condition. Possible values:\n DOMAIN_MAPPING_REASON_UNDEFINED, ROUTE_NOT_READY, PERMISSION_DENIED,\n CERTIFICATE_ALREADY_EXISTS, MAPPING_ALREADY_EXISTS, CERTIFICATE_PENDING,\n CERTIFICATE_FAILED'\n x-dcl-conflicts:\n - reason\n - internalReason\n - revisionReason\n - executionReason\n enum:\n - DOMAIN_MAPPING_REASON_UNDEFINED\n - ROUTE_NOT_READY\n - PERMISSION_DENIED\n - CERTIFICATE_ALREADY_EXISTS\n - MAPPING_ALREADY_EXISTS\n - CERTIFICATE_PENDING\n - CERTIFICATE_FAILED\n executionReason:\n type: string\n x-dcl-go-name: ExecutionReason\n x-dcl-go-type: JobTerminalConditionExecutionReasonEnum\n description: 'A reason for the execution condition. Possible values:\n EXECUTION_REASON_UNDEFINED, JOB_STATUS_SERVICE_POLLING_ERROR, NON_ZERO_EXIT_CODE'\n x-dcl-conflicts:\n - reason\n - internalReason\n - domainMappingReason\n - revisionReason\n enum:\n - EXECUTION_REASON_UNDEFINED\n - JOB_STATUS_SERVICE_POLLING_ERROR\n - NON_ZERO_EXIT_CODE\n internalReason:\n type: string\n x-dcl-go-name: InternalReason\n x-dcl-go-type: JobTerminalConditionInternalReasonEnum\n description: 'A reason for the internal condition. Possible values:\n INTERNAL_REASON_UNDEFINED, CONFLICTING_REVISION_NAME, REVISION_MISSING,\n CONFIGURATION_MISSING, ASSIGNING_TRAFFIC, UPDATING_INGRESS_TRAFFIC_ALLOWED,\n REVISION_ORG_POLICY_VIOLATION, ENABLING_GCFV2_URI_SUPPORT'\n x-dcl-conflicts:\n - reason\n - domainMappingReason\n - revisionReason\n - executionReason\n enum:\n - INTERNAL_REASON_UNDEFINED\n - CONFLICTING_REVISION_NAME\n - REVISION_MISSING\n - CONFIGURATION_MISSING\n - ASSIGNING_TRAFFIC\n - UPDATING_INGRESS_TRAFFIC_ALLOWED\n - REVISION_ORG_POLICY_VIOLATION\n - ENABLING_GCFV2_URI_SUPPORT\n lastTransitionTime:\n type: string\n format: date-time\n x-dcl-go-name: LastTransitionTime\n description: Last time the condition transitioned from one status to\n another.\n message:\n type: string\n x-dcl-go-name: Message\n description: Human readable message indicating details about the current\n status.\n reason:\n type: string\n x-dcl-go-name: Reason\n x-dcl-go-type: JobTerminalConditionReasonEnum\n description: 'A common (service-level) reason for this condition. Possible\n values: COMMON_REASON_UNDEFINED, UNKNOWN, ROUTE_MISSING, REVISION_FAILED,\n PROGRESS_DEADLINE_EXCEEDED, CONTAINER_MISSING, CONTAINER_PERMISSION_DENIED,\n CONTAINER_IMAGE_UNAUTHORIZED, CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED,\n ENCRYPTION_KEY_PERMISSION_DENIED, ENCRYPTION_KEY_CHECK_FAILED, SECRETS_ACCESS_CHECK_FAILED,\n WAITING_FOR_OPERATION, IMMEDIATE_RETRY, POSTPONED_RETRY'\n x-dcl-conflicts:\n - internalReason\n - domainMappingReason\n - revisionReason\n - executionReason\n enum:\n - COMMON_REASON_UNDEFINED\n - UNKNOWN\n - ROUTE_MISSING\n - REVISION_FAILED\n - PROGRESS_DEADLINE_EXCEEDED\n - CONTAINER_MISSING\n - CONTAINER_PERMISSION_DENIED\n - CONTAINER_IMAGE_UNAUTHORIZED\n - CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED\n - ENCRYPTION_KEY_PERMISSION_DENIED\n - ENCRYPTION_KEY_CHECK_FAILED\n - SECRETS_ACCESS_CHECK_FAILED\n - WAITING_FOR_OPERATION\n - IMMEDIATE_RETRY\n - POSTPONED_RETRY\n revisionReason:\n type: string\n x-dcl-go-name: RevisionReason\n x-dcl-go-type: JobTerminalConditionRevisionReasonEnum\n description: 'A reason for the revision condition. Possible values:\n REVISION_REASON_UNDEFINED, PENDING, RESERVE, RETIRED, RETIRING, RECREATING,\n HEALTH_CHECK_CONTAINER_ERROR, CUSTOMIZED_PATH_RESPONSE_PENDING, MIN_INSTANCES_NOT_PROVISIONED,\n ACTIVE_REVISION_LIMIT_REACHED, NO_DEPLOYMENT, HEALTH_CHECK_SKIPPED'\n x-dcl-conflicts:\n - reason\n - internalReason\n - domainMappingReason\n - executionReason\n enum:\n - REVISION_REASON_UNDEFINED\n - PENDING\n - RESERVE\n - RETIRED\n - RETIRING\n - RECREATING\n - HEALTH_CHECK_CONTAINER_ERROR\n - CUSTOMIZED_PATH_RESPONSE_PENDING\n - MIN_INSTANCES_NOT_PROVISIONED\n - ACTIVE_REVISION_LIMIT_REACHED\n - NO_DEPLOYMENT\n - HEALTH_CHECK_SKIPPED\n severity:\n type: string\n x-dcl-go-name: Severity\n x-dcl-go-type: JobTerminalConditionSeverityEnum\n description: 'How to interpret failures of this condition, one of Error,\n Warning, Info Possible values: SEVERITY_UNSPECIFIED, ERROR, WARNING,\n INFO'\n enum:\n - SEVERITY_UNSPECIFIED\n - ERROR\n - WARNING\n - INFO\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: JobTerminalConditionStateEnum\n description: 'State of the condition. Possible values: STATE_UNSPECIFIED,\n CONDITION_PENDING, CONDITION_RECONCILING, CONDITION_FAILED, CONDITION_SUCCEEDED'\n enum:\n - STATE_UNSPECIFIED\n - CONDITION_PENDING\n - CONDITION_RECONCILING\n - CONDITION_FAILED\n - CONDITION_SUCCEEDED\n type:\n type: string\n x-dcl-go-name: Type\n description: 'type is used to communicate the status of the reconciliation\n process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting\n Types common to all resources include: * \"Ready\": True when the Resource\n is ready.'\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Server assigned unique identifier for the Execution.\n The value is a UUID4 string and guaranteed to remain unchanged until the\n resource is deleted.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last-modified time.\n x-kubernetes-immutable: true\n") +var YAML_job = []byte("info:\n title: Run/Job\n description: The Run Job resource\n x-dcl-struct-name: Job\n x-dcl-has-iam: true\npaths:\n get:\n description: The function used to get information about a Job\n parameters:\n - name: Job\n required: true\n description: A full instance of a Job\n apply:\n description: The function used to apply information about a Job\n parameters:\n - name: Job\n required: true\n description: A full instance of a Job\n delete:\n description: The function used to delete a Job\n parameters:\n - name: Job\n required: true\n description: A full instance of a Job\n deleteAll:\n description: The function used to delete all Job\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Job\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Job:\n title: Job\n x-dcl-id: projects/{{project}}/locations/{{location}}/jobs/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: true\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - template\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'KRM-style annotations for the resource. Unstructured key value\n map that may be set by external tools to store and arbitrary metadata.\n They are not queryable and should be preserved when modifying objects.\n Cloud Run will populate some annotations using ''run.googleapis.com''\n or ''serving.knative.dev'' namespaces. This field follows Kubernetes annotations''\n namespacing, limits, and rules. More info: https://kubernetes.io/docs/user-guide/annotations'\n binaryAuthorization:\n type: object\n x-dcl-go-name: BinaryAuthorization\n x-dcl-go-type: JobBinaryAuthorization\n description: Settings for the Binary Authorization feature.\n properties:\n breakglassJustification:\n type: string\n x-dcl-go-name: BreakglassJustification\n description: If present, indicates to use Breakglass using this justification.\n If use_default is False, then it must be empty. For more information\n on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass\n useDefault:\n type: boolean\n x-dcl-go-name: UseDefault\n description: If True, indicates to use the default project's binary\n authorization policy. If False, binary authorization will be disabled.\n client:\n type: string\n x-dcl-go-name: Client\n description: Arbitrary identifier for the API client.\n clientVersion:\n type: string\n x-dcl-go-name: ClientVersion\n description: Arbitrary version identifier for the API client.\n conditions:\n type: array\n x-dcl-go-name: Conditions\n readOnly: true\n description: Output only. The Conditions of all other associated sub-resources.\n They contain additional diagnostics information in case the Job does not\n reach its desired state. See comments in `reconciling` for additional\n information on reconciliation process in Cloud Run.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobConditions\n properties:\n executionReason:\n type: string\n x-dcl-go-name: ExecutionReason\n x-dcl-go-type: JobConditionsExecutionReasonEnum\n description: 'A reason for the execution condition. Possible values:\n EXECUTION_REASON_UNDEFINED, JOB_STATUS_SERVICE_POLLING_ERROR, NON_ZERO_EXIT_CODE'\n x-dcl-conflicts:\n - reason\n - revisionReason\n enum:\n - EXECUTION_REASON_UNDEFINED\n - JOB_STATUS_SERVICE_POLLING_ERROR\n - NON_ZERO_EXIT_CODE\n lastTransitionTime:\n type: string\n format: date-time\n x-dcl-go-name: LastTransitionTime\n description: Last time the condition transitioned from one status\n to another.\n message:\n type: string\n x-dcl-go-name: Message\n description: Human readable message indicating details about the current\n status.\n reason:\n type: string\n x-dcl-go-name: Reason\n x-dcl-go-type: JobConditionsReasonEnum\n description: 'A common (service-level) reason for this condition.\n Possible values: COMMON_REASON_UNDEFINED, UNKNOWN, REVISION_FAILED,\n PROGRESS_DEADLINE_EXCEEDED, BUILD_STEP_FAILED, CONTAINER_MISSING,\n CONTAINER_PERMISSION_DENIED, CONTAINER_IMAGE_UNAUTHORIZED, CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED,\n ENCRYPTION_KEY_PERMISSION_DENIED, ENCRYPTION_KEY_CHECK_FAILED, SECRETS_ACCESS_CHECK_FAILED,\n WAITING_FOR_OPERATION, IMMEDIATE_RETRY, POSTPONED_RETRY'\n x-dcl-conflicts:\n - revisionReason\n - executionReason\n enum:\n - COMMON_REASON_UNDEFINED\n - UNKNOWN\n - REVISION_FAILED\n - PROGRESS_DEADLINE_EXCEEDED\n - BUILD_STEP_FAILED\n - CONTAINER_MISSING\n - CONTAINER_PERMISSION_DENIED\n - CONTAINER_IMAGE_UNAUTHORIZED\n - CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED\n - ENCRYPTION_KEY_PERMISSION_DENIED\n - ENCRYPTION_KEY_CHECK_FAILED\n - SECRETS_ACCESS_CHECK_FAILED\n - WAITING_FOR_OPERATION\n - IMMEDIATE_RETRY\n - POSTPONED_RETRY\n revisionReason:\n type: string\n x-dcl-go-name: RevisionReason\n x-dcl-go-type: JobConditionsRevisionReasonEnum\n description: 'A reason for the revision condition. Possible values:\n REVISION_REASON_UNDEFINED, PENDING, RESERVE, RETIRED, RETIRING,\n RECREATING, HEALTH_CHECK_CONTAINER_ERROR, CUSTOMIZED_PATH_RESPONSE_PENDING,\n MIN_INSTANCES_NOT_PROVISIONED, ACTIVE_REVISION_LIMIT_REACHED, NO_DEPLOYMENT,\n HEALTH_CHECK_SKIPPED'\n x-dcl-conflicts:\n - reason\n - executionReason\n enum:\n - REVISION_REASON_UNDEFINED\n - PENDING\n - RESERVE\n - RETIRED\n - RETIRING\n - RECREATING\n - HEALTH_CHECK_CONTAINER_ERROR\n - CUSTOMIZED_PATH_RESPONSE_PENDING\n - MIN_INSTANCES_NOT_PROVISIONED\n - ACTIVE_REVISION_LIMIT_REACHED\n - NO_DEPLOYMENT\n - HEALTH_CHECK_SKIPPED\n severity:\n type: string\n x-dcl-go-name: Severity\n x-dcl-go-type: JobConditionsSeverityEnum\n description: 'How to interpret failures of this condition, one of\n Error, Warning, Info Possible values: SEVERITY_UNSPECIFIED, ERROR,\n WARNING, INFO'\n enum:\n - SEVERITY_UNSPECIFIED\n - ERROR\n - WARNING\n - INFO\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: JobConditionsStateEnum\n description: 'State of the condition. Possible values: STATE_UNSPECIFIED,\n CONDITION_PENDING, CONDITION_RECONCILING, CONDITION_FAILED, CONDITION_SUCCEEDED'\n enum:\n - STATE_UNSPECIFIED\n - CONDITION_PENDING\n - CONDITION_RECONCILING\n - CONDITION_FAILED\n - CONDITION_SUCCEEDED\n type:\n type: string\n x-dcl-go-name: Type\n description: 'type is used to communicate the status of the reconciliation\n process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting\n Types common to all resources include: * \"Ready\": True when the\n Resource is ready.'\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation time.\n x-kubernetes-immutable: true\n creator:\n type: string\n x-dcl-go-name: Creator\n readOnly: true\n description: Output only. Email address of the authenticated creator.\n x-kubernetes-immutable: true\n deleteTime:\n type: string\n format: date-time\n x-dcl-go-name: DeleteTime\n readOnly: true\n description: Output only. The deletion time.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Output only. A system-generated fingerprint for this version\n of the resource. May be used to detect modification conflict during updates.\n x-kubernetes-immutable: true\n executionCount:\n type: integer\n format: int64\n x-dcl-go-name: ExecutionCount\n readOnly: true\n description: Output only. Number of executions created for this job.\n x-kubernetes-immutable: true\n expireTime:\n type: string\n format: date-time\n x-dcl-go-name: ExpireTime\n readOnly: true\n description: Output only. For a deleted resource, the time after which it\n will be permamently deleted.\n x-kubernetes-immutable: true\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n readOnly: true\n description: Output only. A number that monotonically increases every time\n the user modifies the desired state.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n readOnly: true\n description: KRM-style labels for the resource. User-provided labels are\n shared with Google's billing system, so they can be used to filter, or\n break down billing charges by team, component, environment, state, etc.\n For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels\n or https://cloud.google.com/run/docs/configuring/labels Cloud Run will\n populate some labels with 'run.googleapis.com' or 'serving.knative.dev'\n namespaces. Those labels are read-only, and user changes will not be preserved.\n x-kubernetes-immutable: true\n lastModifier:\n type: string\n x-dcl-go-name: LastModifier\n readOnly: true\n description: Output only. Email address of the last authenticated modifier.\n x-kubernetes-immutable: true\n latestCreatedExecution:\n type: object\n x-dcl-go-name: LatestCreatedExecution\n x-dcl-go-type: JobLatestCreatedExecution\n readOnly: true\n description: Output only. Name of the last created execution.\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n description: Creation timestamp of the execution.\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the execution.\n x-dcl-references:\n - resource: Run/Execution\n field: selfLink\n parent: true\n latestSucceededExecution:\n type: object\n x-dcl-go-name: LatestSucceededExecution\n x-dcl-go-type: JobLatestSucceededExecution\n readOnly: true\n description: Output only. Name of the last succeeded execution.\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n description: Creation timestamp of the execution.\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the execution.\n x-dcl-references:\n - resource: Run/Execution\n field: selfLink\n parent: true\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: JobLaunchStageEnum\n description: 'The launch stage as defined by [Google Cloud Platform Launch\n Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports\n `ALPHA`, `BETA`, and `GA`. If no value is specified, GA is assumed. Possible\n values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS,\n ALPHA, BETA, GA, DEPRECATED'\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'The fully qualified name of this Job. Format: projects/{project}/locations/{location}/jobs/{job}'\n observedGeneration:\n type: integer\n format: int64\n x-dcl-go-name: ObservedGeneration\n readOnly: true\n description: Output only. The generation of this Job. See comments in `reconciling`\n for additional information on reconciliation process in Cloud Run.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: 'Output only. Returns true if the Job is currently being acted\n upon by the system to bring it into the desired state. When a new Job\n is created, or an existing one is updated, Cloud Run will asynchronously\n perform all necessary steps to bring the Job to the desired state. This\n process is called reconciliation. While reconciliation is in process,\n `observed_generation` and `latest_succeeded_execution`, will have transient\n values that might mismatch the intended state: Once reconciliation is\n over (and this field is false), there are two possible outcomes: reconciliation\n succeeded and the state matches the Job, or there was an error, and reconciliation\n failed. This state can be found in `terminal_condition.state`. If reconciliation\n succeeded, the following fields will match: `observed_generation` and\n `generation`, `latest_succeeded_execution` and `latest_created_execution`.\n If reconciliation failed, `observed_generation` and `latest_succeeded_execution`\n will have the state of the last succeeded execution or empty for newly\n created Job. Additional information on the failure can be found in `terminal_condition`\n and `conditions`.'\n x-kubernetes-immutable: true\n template:\n type: object\n x-dcl-go-name: Template\n x-dcl-go-type: JobTemplate\n description: Required. The template used to create executions for this Job.\n required:\n - template\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: KRM-style annotations for the resource.\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: KRM-style labels for the resource.\n parallelism:\n type: integer\n format: int64\n x-dcl-go-name: Parallelism\n description: 'Specifies the maximum desired number of tasks the execution\n should run at any given time. Must be <= task_count. The actual number\n of tasks running in steady state will be less than this number when\n ((.spec.task_count - .status.successful) < .spec.parallelism), i.e.\n when the work left to do is less than max parallelism. More info:\n https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/'\n x-dcl-server-default: true\n taskCount:\n type: integer\n format: int64\n x-dcl-go-name: TaskCount\n description: 'Specifies the desired number of tasks the execution should\n run. Setting to 1 means that parallelism is limited to 1 and the success\n of that task signals the success of the execution. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/'\n x-dcl-server-default: true\n template:\n type: object\n x-dcl-go-name: Template\n x-dcl-go-type: JobTemplateTemplate\n description: Required. Describes the task(s) that will be created when\n executing an execution.\n properties:\n containers:\n type: array\n x-dcl-go-name: Containers\n description: Holds the single container that defines the unit of\n execution for this task.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateContainers\n required:\n - image\n properties:\n args:\n type: array\n x-dcl-go-name: Args\n description: 'Arguments to the entrypoint. The docker image''s\n CMD is used if this is not provided. Variable references\n $(VAR_NAME) are expanded using the container''s environment.\n If a variable cannot be resolved, the reference in the input\n string will be unchanged. The $(VAR_NAME) syntax can be\n escaped with a double $$, ie: $$(VAR_NAME). Escaped references\n will never be expanded, regardless of whether the variable\n exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n command:\n type: array\n x-dcl-go-name: Command\n description: 'Entrypoint array. Not executed within a shell.\n The docker image''s ENTRYPOINT is used if this is not provided.\n Variable references $(VAR_NAME) are expanded using the container''s\n environment. If a variable cannot be resolved, the reference\n in the input string will be unchanged. The $(VAR_NAME) syntax\n can be escaped with a double $$, ie: $$(VAR_NAME). Escaped\n references will never be expanded, regardless of whether\n the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n env:\n type: array\n x-dcl-go-name: Env\n description: List of environment variables to set in the container.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateContainersEnv\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Name of the environment variable.\n Must be a C_IDENTIFIER, and mnay not exceed 32768\n characters.\n value:\n type: string\n x-dcl-go-name: Value\n description: 'Variable references $(VAR_NAME) are expanded\n using the previous defined environment variables in\n the container and any route environment variables.\n If a variable cannot be resolved, the reference in\n the input string will be unchanged. The $(VAR_NAME)\n syntax can be escaped with a double $$, ie: $$(VAR_NAME).\n Escaped references will never be expanded, regardless\n of whether the variable exists or not. Defaults to\n \"\", and the maximum length is 32768 bytes.'\n x-dcl-conflicts:\n - valueSource\n valueSource:\n type: object\n x-dcl-go-name: ValueSource\n x-dcl-go-type: JobTemplateTemplateContainersEnvValueSource\n description: Source for the environment variable's value.\n x-dcl-conflicts:\n - value\n properties:\n secretKeyRef:\n type: object\n x-dcl-go-name: SecretKeyRef\n x-dcl-go-type: JobTemplateTemplateContainersEnvValueSourceSecretKeyRef\n description: Selects a secret and a specific version\n from Cloud Secret Manager.\n required:\n - secret\n properties:\n secret:\n type: string\n x-dcl-go-name: Secret\n description: Required. The name of the secret\n in Cloud Secret Manager. Format {secret_name}\n if the secret is in the same project. projects/{project}/secrets/{secret_name}\n if the secret is in a different project.\n x-dcl-references:\n - resource: Secretmanager/Secret\n field: selfLink\n version:\n type: string\n x-dcl-go-name: Version\n description: The Cloud Secret Manager secret\n version. Can be 'latest' for the latest value\n or an integer for a specific version.\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: selfLink\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Required. URL of the Container image in Google\n Container Registry or Docker More info: https://kubernetes.io/docs/concepts/containers/images'\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the container specified as a DNS_LABEL.\n ports:\n type: array\n x-dcl-go-name: Ports\n description: List of ports to expose from the container. Only\n a single port can be specified. The specified ports must\n be listening on all interfaces (0.0.0.0) within the container\n to be accessible. If omitted, a port number will be chosen\n and passed to the container through the PORT environment\n variable for the container to listen on.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateContainersPorts\n properties:\n containerPort:\n type: integer\n format: int64\n x-dcl-go-name: ContainerPort\n description: Port number the container listens on. This\n must be a valid TCP port number, 0 < container_port\n < 65536.\n name:\n type: string\n x-dcl-go-name: Name\n description: If specified, used to specify which protocol\n to use. Allowed values are \"http1\" and \"h2c\".\n resources:\n type: object\n x-dcl-go-name: Resources\n x-dcl-go-type: JobTemplateTemplateContainersResources\n description: 'Compute Resource requirements by this container.\n More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'\n x-dcl-server-default: true\n properties:\n cpuIdle:\n type: boolean\n x-dcl-go-name: CpuIdle\n description: Determines whether CPU should be throttled\n or not outside of requests.\n x-dcl-server-default: true\n limits:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Limits\n description: 'Only memory and CPU are supported. Note:\n The only supported values for CPU are ''1'', ''2'',\n and ''4''. Setting 4 CPU requires at least 2Gi of memory.\n The values of the map is string form of the ''quantity''\n k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go'\n volumeMounts:\n type: array\n x-dcl-go-name: VolumeMounts\n description: Volume to mount into the container's filesystem.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateContainersVolumeMounts\n required:\n - name\n - mountPath\n properties:\n mountPath:\n type: string\n x-dcl-go-name: MountPath\n description: Required. Path within the container at\n which the volume should be mounted. Must not contain\n ':'. For Cloud SQL volumes, it can be left empty,\n or must otherwise be `/cloudsql`. All instances defined\n in the Volume will be available as `/cloudsql/[instance]`.\n For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. This must match the Name of a\n Volume.\n encryptionKey:\n type: string\n x-dcl-go-name: EncryptionKey\n description: A reference to a customer managed encryption key (CMEK)\n to use to encrypt this container image. For more information,\n go to https://cloud.google.com/run/docs/securing/using-cmek\n x-dcl-references:\n - resource: Cloudkms/CryptoKey\n field: selfLink\n executionEnvironment:\n type: string\n x-dcl-go-name: ExecutionEnvironment\n x-dcl-go-type: JobTemplateTemplateExecutionEnvironmentEnum\n description: 'The execution environment being used to host this\n Task. Possible values: EXECUTION_ENVIRONMENT_UNSPECIFIED, EXECUTION_ENVIRONMENT_DEFAULT,\n EXECUTION_ENVIRONMENT_GEN2'\n enum:\n - EXECUTION_ENVIRONMENT_UNSPECIFIED\n - EXECUTION_ENVIRONMENT_DEFAULT\n - EXECUTION_ENVIRONMENT_GEN2\n maxRetries:\n type: integer\n format: int64\n x-dcl-go-name: MaxRetries\n description: Number of retries allowed per Task, before marking\n this Task failed.\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Email address of the IAM service account associated\n with the Task of a Job. The service account represents the identity\n of the running task, and determines what permissions the task\n has. If not provided, the task will use the project's default\n service account.\n x-dcl-server-default: true\n timeout:\n type: string\n x-dcl-go-name: Timeout\n description: Max allowed time duration the Task may be active before\n the system will actively try to mark it failed and kill associated\n containers. This applies per attempt of a task, meaning each retry\n can run for the full timeout.\n x-dcl-server-default: true\n volumes:\n type: array\n x-dcl-go-name: Volumes\n description: A list of Volumes to make available to containers.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateVolumes\n required:\n - name\n properties:\n cloudSqlInstance:\n type: object\n x-dcl-go-name: CloudSqlInstance\n x-dcl-go-type: JobTemplateTemplateVolumesCloudSqlInstance\n description: For Cloud SQL volumes, contains the specific\n instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run\n for more information on how to connect Cloud SQL and Cloud\n Run.\n x-dcl-conflicts:\n - secret\n properties:\n instances:\n type: array\n x-dcl-go-name: Instances\n description: The Cloud SQL instance connection names,\n as can be found in https://console.cloud.google.com/sql/instances.\n Visit https://cloud.google.com/sql/docs/mysql/connect-run\n for more information on how to connect Cloud SQL and\n Cloud Run. Format {project}:{location}:{instance}\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Volume's name.\n secret:\n type: object\n x-dcl-go-name: Secret\n x-dcl-go-type: JobTemplateTemplateVolumesSecret\n description: 'Secret represents a secret that should populate\n this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret'\n x-dcl-conflicts:\n - cloudSqlInstance\n required:\n - secret\n properties:\n defaultMode:\n type: integer\n format: int64\n x-dcl-go-name: DefaultMode\n description: 'Integer representation of mode bits to use\n on created files by default. Must be a value between\n 0000 and 0777 (octal), defaulting to 0644. Directories\n within the path are not affected by this setting. Notes\n * Internally, a umask of 0222 will be applied to any\n non-zero value. * This is an integer representation\n of the mode bits. So, the octal integer value should\n look exactly as the chmod numeric notation with a leading\n zero. Some examples: for chmod 777 (a=rwx), set to 0777\n (octal) or 511 (base-10). For chmod 640 (u=rw,g=r),\n set to 0640 (octal) or 416 (base-10). For chmod 755\n (u=rwx,g=rx,o=rx), set to 0755 (octal) or 493 (base-10).\n * This might be in conflict with other options that\n affect the file mode, like fsGroup, and the result can\n be other mode bits set. This might be in conflict with\n other options that affect the file mode, like fsGroup,\n and as a result, other mode bits could be set.'\n items:\n type: array\n x-dcl-go-name: Items\n description: If unspecified, the volume will expose a\n file whose name is the secret, relative to VolumeMount.mount_path.\n If specified, the key will be used as the version to\n fetch from Cloud Secret Manager and the path will be\n the name of the file exposed in the volume. When items\n are defined, they must specify a path and a version.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: JobTemplateTemplateVolumesSecretItems\n required:\n - path\n properties:\n mode:\n type: integer\n format: int64\n x-dcl-go-name: Mode\n description: 'Integer octal mode bits to use on\n this file, must be a value between 01 and 0777\n (octal). If 0 or not set, the Volume''s default\n mode will be used. Notes * Internally, a umask\n of 0222 will be applied to any non-zero value.\n * This is an integer representation of the mode\n bits. So, the octal integer value should look\n exactly as the chmod numeric notation with a leading\n zero. Some examples: for chmod 777 (a=rwx), set\n to 0777 (octal) or 511 (base-10). For chmod 640\n (u=rw,g=r), set to 0640 (octal) or 416 (base-10).\n For chmod 755 (u=rwx,g=rx,o=rx), set to 0755 (octal)\n or 493 (base-10). * This might be in conflict\n with other options that affect the file mode,\n like fsGroup, and the result can be other mode\n bits set.'\n path:\n type: string\n x-dcl-go-name: Path\n description: Required. The relative path of the\n secret in the container.\n version:\n type: string\n x-dcl-go-name: Version\n description: The Cloud Secret Manager secret version.\n Can be 'latest' for the latest value or an integer\n for a specific version.\n secret:\n type: string\n x-dcl-go-name: Secret\n description: Required. The name of the secret in Cloud\n Secret Manager. Format {secret} if the secret is in\n the same project. projects/{project}/secrets/{secret}\n if the secret is in a different project.\n vpcAccess:\n type: object\n x-dcl-go-name: VPCAccess\n x-dcl-go-type: JobTemplateTemplateVPCAccess\n description: VPC Access configuration to use for this Task. For\n more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc.\n properties:\n connector:\n type: string\n x-dcl-go-name: Connector\n description: 'VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}'\n x-dcl-references:\n - resource: Vpcaccess/Connector\n field: selfLink\n egress:\n type: string\n x-dcl-go-name: Egress\n x-dcl-go-type: JobTemplateTemplateVPCAccessEgressEnum\n description: 'Traffic VPC egress settings. Possible values:\n VPC_EGRESS_UNSPECIFIED, ALL_TRAFFIC, PRIVATE_RANGES_ONLY'\n enum:\n - VPC_EGRESS_UNSPECIFIED\n - ALL_TRAFFIC\n - PRIVATE_RANGES_ONLY\n terminalCondition:\n type: object\n x-dcl-go-name: TerminalCondition\n x-dcl-go-type: JobTerminalCondition\n readOnly: true\n description: Output only. The Condition of this Job, containing its readiness\n status, and detailed error information in case it did not reach the desired\n state.\n properties:\n domainMappingReason:\n type: string\n x-dcl-go-name: DomainMappingReason\n x-dcl-go-type: JobTerminalConditionDomainMappingReasonEnum\n description: 'A reason for the domain mapping condition. Possible values:\n DOMAIN_MAPPING_REASON_UNDEFINED, ROUTE_NOT_READY, PERMISSION_DENIED,\n CERTIFICATE_ALREADY_EXISTS, MAPPING_ALREADY_EXISTS, CERTIFICATE_PENDING,\n CERTIFICATE_FAILED'\n x-dcl-conflicts:\n - reason\n - internalReason\n - revisionReason\n - executionReason\n enum:\n - DOMAIN_MAPPING_REASON_UNDEFINED\n - ROUTE_NOT_READY\n - PERMISSION_DENIED\n - CERTIFICATE_ALREADY_EXISTS\n - MAPPING_ALREADY_EXISTS\n - CERTIFICATE_PENDING\n - CERTIFICATE_FAILED\n executionReason:\n type: string\n x-dcl-go-name: ExecutionReason\n x-dcl-go-type: JobTerminalConditionExecutionReasonEnum\n description: 'A reason for the execution condition. Possible values:\n EXECUTION_REASON_UNDEFINED, JOB_STATUS_SERVICE_POLLING_ERROR, NON_ZERO_EXIT_CODE'\n x-dcl-conflicts:\n - reason\n - internalReason\n - domainMappingReason\n - revisionReason\n enum:\n - EXECUTION_REASON_UNDEFINED\n - JOB_STATUS_SERVICE_POLLING_ERROR\n - NON_ZERO_EXIT_CODE\n internalReason:\n type: string\n x-dcl-go-name: InternalReason\n x-dcl-go-type: JobTerminalConditionInternalReasonEnum\n description: 'A reason for the internal condition. Possible values:\n INTERNAL_REASON_UNDEFINED, CONFLICTING_REVISION_NAME, REVISION_MISSING,\n CONFIGURATION_MISSING, ASSIGNING_TRAFFIC, UPDATING_INGRESS_TRAFFIC_ALLOWED,\n REVISION_ORG_POLICY_VIOLATION, ENABLING_GCFV2_URI_SUPPORT'\n x-dcl-conflicts:\n - reason\n - domainMappingReason\n - revisionReason\n - executionReason\n enum:\n - INTERNAL_REASON_UNDEFINED\n - CONFLICTING_REVISION_NAME\n - REVISION_MISSING\n - CONFIGURATION_MISSING\n - ASSIGNING_TRAFFIC\n - UPDATING_INGRESS_TRAFFIC_ALLOWED\n - REVISION_ORG_POLICY_VIOLATION\n - ENABLING_GCFV2_URI_SUPPORT\n lastTransitionTime:\n type: string\n format: date-time\n x-dcl-go-name: LastTransitionTime\n description: Last time the condition transitioned from one status to\n another.\n message:\n type: string\n x-dcl-go-name: Message\n description: Human readable message indicating details about the current\n status.\n reason:\n type: string\n x-dcl-go-name: Reason\n x-dcl-go-type: JobTerminalConditionReasonEnum\n description: 'A common (service-level) reason for this condition. Possible\n values: COMMON_REASON_UNDEFINED, UNKNOWN, ROUTE_MISSING, REVISION_FAILED,\n PROGRESS_DEADLINE_EXCEEDED, CONTAINER_MISSING, CONTAINER_PERMISSION_DENIED,\n CONTAINER_IMAGE_UNAUTHORIZED, CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED,\n ENCRYPTION_KEY_PERMISSION_DENIED, ENCRYPTION_KEY_CHECK_FAILED, SECRETS_ACCESS_CHECK_FAILED,\n WAITING_FOR_OPERATION, IMMEDIATE_RETRY, POSTPONED_RETRY'\n x-dcl-conflicts:\n - internalReason\n - domainMappingReason\n - revisionReason\n - executionReason\n enum:\n - COMMON_REASON_UNDEFINED\n - UNKNOWN\n - ROUTE_MISSING\n - REVISION_FAILED\n - PROGRESS_DEADLINE_EXCEEDED\n - CONTAINER_MISSING\n - CONTAINER_PERMISSION_DENIED\n - CONTAINER_IMAGE_UNAUTHORIZED\n - CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED\n - ENCRYPTION_KEY_PERMISSION_DENIED\n - ENCRYPTION_KEY_CHECK_FAILED\n - SECRETS_ACCESS_CHECK_FAILED\n - WAITING_FOR_OPERATION\n - IMMEDIATE_RETRY\n - POSTPONED_RETRY\n revisionReason:\n type: string\n x-dcl-go-name: RevisionReason\n x-dcl-go-type: JobTerminalConditionRevisionReasonEnum\n description: 'A reason for the revision condition. Possible values:\n REVISION_REASON_UNDEFINED, PENDING, RESERVE, RETIRED, RETIRING, RECREATING,\n HEALTH_CHECK_CONTAINER_ERROR, CUSTOMIZED_PATH_RESPONSE_PENDING, MIN_INSTANCES_NOT_PROVISIONED,\n ACTIVE_REVISION_LIMIT_REACHED, NO_DEPLOYMENT, HEALTH_CHECK_SKIPPED'\n x-dcl-conflicts:\n - reason\n - internalReason\n - domainMappingReason\n - executionReason\n enum:\n - REVISION_REASON_UNDEFINED\n - PENDING\n - RESERVE\n - RETIRED\n - RETIRING\n - RECREATING\n - HEALTH_CHECK_CONTAINER_ERROR\n - CUSTOMIZED_PATH_RESPONSE_PENDING\n - MIN_INSTANCES_NOT_PROVISIONED\n - ACTIVE_REVISION_LIMIT_REACHED\n - NO_DEPLOYMENT\n - HEALTH_CHECK_SKIPPED\n severity:\n type: string\n x-dcl-go-name: Severity\n x-dcl-go-type: JobTerminalConditionSeverityEnum\n description: 'How to interpret failures of this condition, one of Error,\n Warning, Info Possible values: SEVERITY_UNSPECIFIED, ERROR, WARNING,\n INFO'\n enum:\n - SEVERITY_UNSPECIFIED\n - ERROR\n - WARNING\n - INFO\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: JobTerminalConditionStateEnum\n description: 'State of the condition. Possible values: STATE_UNSPECIFIED,\n CONDITION_PENDING, CONDITION_RECONCILING, CONDITION_FAILED, CONDITION_SUCCEEDED'\n enum:\n - STATE_UNSPECIFIED\n - CONDITION_PENDING\n - CONDITION_RECONCILING\n - CONDITION_FAILED\n - CONDITION_SUCCEEDED\n type:\n type: string\n x-dcl-go-name: Type\n description: 'type is used to communicate the status of the reconciliation\n process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting\n Types common to all resources include: * \"Ready\": True when the Resource\n is ready.'\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Server assigned unique identifier for the Execution.\n The value is a UUID4 string and guaranteed to remain unchanged until the\n resource is deleted.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last-modified time.\n x-kubernetes-immutable: true\n") -// 50218 bytes -// MD5: 5c4990075825ac0d623f4248c86316e8 +// 50209 bytes +// MD5: 4f895c8e44197ac3e6821bca8894ccc3 diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job_schema.go index eb8c3cd0f6..153de657fa 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/job_schema.go @@ -576,7 +576,7 @@ func DCLJobSchema() *dcl.Schema { "secret": &dcl.Property{ Type: "string", GoName: "Secret", - Description: "Required. The name of the secret in Cloud Secret Manager. Format: {secret_name} if the secret is in the same project. projects/{project}/secrets/{secret_name} if the secret is in a different project.", + Description: "Required. The name of the secret in Cloud Secret Manager. Format {secret_name} if the secret is in the same project. projects/{project}/secrets/{secret_name} if the secret is in a different project.", ResourceReferences: []*dcl.PropertyResourceReference{ &dcl.PropertyResourceReference{ Resource: "Secretmanager/Secret", @@ -754,7 +754,7 @@ func DCLJobSchema() *dcl.Schema { "instances": &dcl.Property{ Type: "array", GoName: "Instances", - Description: "The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance}", + Description: "The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format {project}:{location}:{instance}", SendEmpty: true, ListType: "list", Items: &dcl.Property{ @@ -822,7 +822,7 @@ func DCLJobSchema() *dcl.Schema { "secret": &dcl.Property{ Type: "string", GoName: "Secret", - Description: "Required. The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project.", + Description: "Required. The name of the secret in Cloud Secret Manager. Format {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project.", }, }, }, diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service.go index 6f75f1ad17..08ef2312be 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service.go @@ -1457,10 +1457,9 @@ func (c *Client) GetService(ctx context.Context, r *Service) (*Service, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service.yaml index 4a36f8b619..93b709d4a7 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service.yaml @@ -380,10 +380,10 @@ components: secret: type: string x-dcl-go-name: Secret - description: 'Required. The name of the secret in - Cloud Secret Manager. Format: {secret_name} if + description: Required. The name of the secret in + Cloud Secret Manager. Format {secret_name} if the secret is in the same project. projects/{project}/secrets/{secret_name} - if the secret is in a different project.' + if the secret is in a different project. x-dcl-references: - resource: Secretmanager/Secret field: name @@ -560,11 +560,11 @@ components: instances: type: array x-dcl-go-name: Instances - description: 'The Cloud SQL instance connection names, as - can be found in https://console.cloud.google.com/sql/instances. + description: The Cloud SQL instance connection names, as can + be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud - Run. Format: {project}:{location}:{instance}' + Run. Format {project}:{location}:{instance} x-dcl-send-empty: true x-dcl-list-type: list items: @@ -660,10 +660,10 @@ components: secret: type: string x-dcl-go-name: Secret - description: 'Required. The name of the secret in Cloud Secret - Manager. Format: {secret} if the secret is in the same project. + description: Required. The name of the secret in Cloud Secret + Manager. Format {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in - a different project.' + a different project. x-dcl-references: - resource: Secretmanager/Secret field: name diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service_alpha_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service_alpha_yaml_embed.go index 4aa1ae2d1b..a970aeb208 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service_alpha_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service_alpha_yaml_embed.go @@ -17,7 +17,7 @@ package alpha // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/run/alpha/service.yaml -var YAML_service = []byte("info:\n title: Run/Service\n description: The Run Service resource\n x-dcl-struct-name: Service\n x-dcl-has-iam: true\npaths:\n get:\n description: The function used to get information about a Service\n parameters:\n - name: Service\n required: true\n description: A full instance of a Service\n apply:\n description: The function used to apply information about a Service\n parameters:\n - name: Service\n required: true\n description: A full instance of a Service\n delete:\n description: The function used to delete a Service\n parameters:\n - name: Service\n required: true\n description: A full instance of a Service\n deleteAll:\n description: The function used to delete all Service\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Service\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Service:\n title: Service\n x-dcl-id: projects/{{project}}/locations/{{location}}/services/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: true\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - template\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'Unstructured key value map that may be set by external tools\n to store and arbitrary metadata. They are not queryable and should be\n preserved when modifying objects. Cloud Run will populate some annotations\n using ''run.googleapis.com'' or ''serving.knative.dev'' namespaces. This\n field follows Kubernetes annotations'' namespacing, limits, and rules.\n More info: http://kubernetes.io/docs/user-guide/annotations'\n binaryAuthorization:\n type: object\n x-dcl-go-name: BinaryAuthorization\n x-dcl-go-type: ServiceBinaryAuthorization\n description: Settings for the Binary Authorization feature.\n properties:\n breakglassJustification:\n type: string\n x-dcl-go-name: BreakglassJustification\n description: If present, indicates to use Breakglass using this justification.\n For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass\n useDefault:\n type: boolean\n x-dcl-go-name: UseDefault\n description: If True, indicates to use the default project's binary\n authorization policy. If False, binary authorization will be disabled\n client:\n type: string\n x-dcl-go-name: Client\n description: Arbitrary identifier for the API client.\n clientVersion:\n type: string\n x-dcl-go-name: ClientVersion\n description: Arbitrary version identifier for the API client.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation time.\n x-kubernetes-immutable: true\n creator:\n type: string\n x-dcl-go-name: Creator\n readOnly: true\n description: Output only. Email address of the authenticated creator.\n x-kubernetes-immutable: true\n deleteTime:\n type: string\n format: date-time\n x-dcl-go-name: DeleteTime\n readOnly: true\n description: Output only. The deletion time.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: User-provided description of the Service.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Output only. A system-generated fingerprint for this version\n of the resource. May be used to detect modification conflict during updates.\n x-kubernetes-immutable: true\n expireTime:\n type: string\n format: date-time\n x-dcl-go-name: ExpireTime\n readOnly: true\n description: Output only. For a deleted resource, the time after which it\n will be permamently deleted.\n x-kubernetes-immutable: true\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n readOnly: true\n description: Output only. A number that monotonically increases every time\n the user modifies the desired state.\n x-kubernetes-immutable: true\n ingress:\n type: string\n x-dcl-go-name: Ingress\n x-dcl-go-type: ServiceIngressEnum\n description: Provides the ingress settings for this Service. On output,\n returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED\n if no revision is active.\n enum:\n - INGRESS_TRAFFIC_UNSPECIFIED\n - INGRESS_TRAFFIC_ALL\n - INGRESS_TRAFFIC_INTERNAL_ONLY\n - INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n readOnly: true\n description: Map of string keys and values that can be used to organize\n and categorize objects. User-provided labels are shared with Google's\n billing system, so they can be used to filter, or break down billing charges\n by team, component, environment, state, etc. For more information, visit\n https://cloud.google.com/resource-manager/docs/creating-managing-labels\n or https://cloud.google.com/run/docs/configuring/labels Cloud Run will\n populate some labels with 'run.googleapis.com' or 'serving.knative.dev'\n namespaces. Those labels are read-only, and user changes will not be preserved.\n x-kubernetes-immutable: true\n lastModifier:\n type: string\n x-dcl-go-name: LastModifier\n readOnly: true\n description: Output only. Email address of the last authenticated modifier.\n x-kubernetes-immutable: true\n latestCreatedRevision:\n type: string\n x-dcl-go-name: LatestCreatedRevision\n readOnly: true\n description: Output only. Name of the last created revision. See comments\n in `reconciling` for additional information on reconciliation process\n in Cloud Run.\n x-kubernetes-immutable: true\n latestReadyRevision:\n type: string\n x-dcl-go-name: LatestReadyRevision\n readOnly: true\n description: Output only. Name of the latest revision that is serving traffic.\n See comments in `reconciling` for additional information on reconciliation\n process in Cloud Run.\n x-kubernetes-immutable: true\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: ServiceLaunchStageEnum\n description: 'The launch stage as defined by [Google Cloud Platform Launch\n Stages](http://cloud.google.com/terms/launch-stages). Cloud Run supports\n `ALPHA`, `BETA`, and `GA`. If no value is specified, GA is assumed. Possible\n values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS,\n ALPHA, BETA, GA, DEPRECATED'\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Output only. The fully qualified name of this Service, composed\n from CreateServiceRequest.parent and CreateServiceRequest.service_id.\n Format: projects/{project}/locations/{location}/services/{service_id}'\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: 'Output only. Returns true if the Service is currently being\n acted upon by the system to bring it into the desired state. When a new\n Service is created, or an existing one is updated, Cloud Run will asynchronously\n perform all necessary steps to bring the Service to the desired serving\n state. This process is called reconciliation. While reconciliation is\n in process, `observed_generation`, `latest_ready_revison`, `traffic_statuses`,\n and `uri` will have transient values that might mismatch the intended\n state: Once reconciliation is over (and this field is false), there are\n two possible outcomes: reconciliation succeeded and the serving state\n matches the Service, or there was an error, and reconciliation failed.\n This state can be found in `terminal_condition.state`. If reconciliation\n succeeded, the following fields will match: `traffic` and `traffic_statuses`,\n `observed_generation` and `generation`, `latest_ready_revision` and `latest_created_revision`.\n If reconciliation failed, `traffic_statuses`, `observed_generation`, and\n `latest_ready_revision` will have the state of the last serving revision,\n or empty for newly created Services. Additional information on the failure\n can be found in `terminal_condition` and `conditions`.'\n x-kubernetes-immutable: true\n template:\n type: object\n x-dcl-go-name: Template\n x-dcl-go-type: ServiceTemplate\n description: Required. The template used to create revisions for this Service.\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: KRM-style annotations for the resource.\n containerConcurrency:\n type: integer\n format: int64\n x-dcl-go-name: ContainerConcurrency\n description: Sets the maximum number of requests that each serving instance\n can receive.\n containers:\n type: array\n x-dcl-go-name: Containers\n description: Holds the single container that defines the unit of execution\n for this Revision.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateContainers\n required:\n - image\n properties:\n args:\n type: array\n x-dcl-go-name: Args\n description: 'Arguments to the entrypoint. The docker image''s\n CMD is used if this is not provided. Variable references $(VAR_NAME)\n are expanded using the container''s environment. If a variable\n cannot be resolved, the reference in the input string will be\n unchanged. The $(VAR_NAME) syntax can be escaped with a double\n $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\n regardless of whether the variable exists or not. More info:\n https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n command:\n type: array\n x-dcl-go-name: Command\n description: 'Entrypoint array. Not executed within a shell. The\n docker image''s ENTRYPOINT is used if this is not provided.\n Variable references $(VAR_NAME) are expanded using the container''s\n environment. If a variable cannot be resolved, the reference\n in the input string will be unchanged. The $(VAR_NAME) syntax\n can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references\n will never be expanded, regardless of whether the variable exists\n or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n env:\n type: array\n x-dcl-go-name: Env\n description: List of environment variables to set in the container.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateContainersEnv\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Name of the environment variable.\n Must be a C_IDENTIFIER, and mnay not exceed 32768 characters.\n value:\n type: string\n x-dcl-go-name: Value\n description: 'Variable references $(VAR_NAME) are expanded\n using the previous defined environment variables in the\n container and any route environment variables. If a variable\n cannot be resolved, the reference in the input string\n will be unchanged. The $(VAR_NAME) syntax can be escaped\n with a double $$, ie: $$(VAR_NAME). Escaped references\n will never be expanded, regardless of whether the variable\n exists or not. Defaults to \"\", and the maximum length\n is 32768 bytes.'\n x-dcl-conflicts:\n - valueSource\n valueSource:\n type: object\n x-dcl-go-name: ValueSource\n x-dcl-go-type: ServiceTemplateContainersEnvValueSource\n description: Source for the environment variable's value.\n x-dcl-conflicts:\n - value\n properties:\n secretKeyRef:\n type: object\n x-dcl-go-name: SecretKeyRef\n x-dcl-go-type: ServiceTemplateContainersEnvValueSourceSecretKeyRef\n description: Selects a secret and a specific version\n from Cloud Secret Manager.\n required:\n - secret\n properties:\n secret:\n type: string\n x-dcl-go-name: Secret\n description: 'Required. The name of the secret in\n Cloud Secret Manager. Format: {secret_name} if\n the secret is in the same project. projects/{project}/secrets/{secret_name}\n if the secret is in a different project.'\n x-dcl-references:\n - resource: Secretmanager/Secret\n field: name\n version:\n type: string\n x-dcl-go-name: Version\n description: The Cloud Secret Manager secret version.\n Can be 'latest' for the latest value or an integer\n for a specific version.\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: name\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Required. URL of the Container image in Google Container\n Registry or Docker More info: https://kubernetes.io/docs/concepts/containers/images'\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the container specified as a DNS_LABEL.\n ports:\n type: array\n x-dcl-go-name: Ports\n description: List of ports to expose from the container. Only\n a single port can be specified. The specified ports must be\n listening on all interfaces (0.0.0.0) within the container to\n be accessible. If omitted, a port number will be chosen and\n passed to the container through the PORT environment variable\n for the container to listen on.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateContainersPorts\n properties:\n containerPort:\n type: integer\n format: int64\n x-dcl-go-name: ContainerPort\n description: Port number the container listens on. This\n must be a valid TCP port number, 0 < container_port <\n 65536.\n name:\n type: string\n x-dcl-go-name: Name\n description: If specified, used to specify which protocol\n to use. Allowed values are \"http1\" and \"h2c\".\n resources:\n type: object\n x-dcl-go-name: Resources\n x-dcl-go-type: ServiceTemplateContainersResources\n description: 'Compute Resource requirements by this container.\n More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'\n properties:\n cpuIdle:\n type: boolean\n x-dcl-go-name: CpuIdle\n description: Determines whether CPU should be throttled or\n not outside of requests.\n limits:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Limits\n description: 'Only memory and CPU are supported. Note: The\n only supported values for CPU are ''1'', ''2'', and ''4''.\n Setting 4 CPU requires at least 2Gi of memory. The values\n of the map is string form of the ''quantity'' k8s type:\n https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go'\n volumeMounts:\n type: array\n x-dcl-go-name: VolumeMounts\n description: Volume to mount into the container's filesystem.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateContainersVolumeMounts\n required:\n - name\n - mountPath\n properties:\n mountPath:\n type: string\n x-dcl-go-name: MountPath\n description: Required. Path within the container at which\n the volume should be mounted. Must not contain ':'. For\n Cloud SQL volumes, it can be left empty, or must otherwise\n be `/cloudsql`. All instances defined in the Volume will\n be available as `/cloudsql/[instance]`. For more information\n on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. This must match the Name of a Volume.\n executionEnvironment:\n type: string\n x-dcl-go-name: ExecutionEnvironment\n x-dcl-go-type: ServiceTemplateExecutionEnvironmentEnum\n description: 'The sandbox environment to host this Revision. Possible\n values: EXECUTION_ENVIRONMENT_UNSPECIFIED, EXECUTION_ENVIRONMENT_GEN1,\n EXECUTION_ENVIRONMENT_GEN2'\n enum:\n - EXECUTION_ENVIRONMENT_UNSPECIFIED\n - EXECUTION_ENVIRONMENT_GEN1\n - EXECUTION_ENVIRONMENT_GEN2\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: KRM-style labels for the resource.\n revision:\n type: string\n x-dcl-go-name: Revision\n description: The unique name for the revision. If this field is omitted,\n it will be automatically generated based on the Service name.\n scaling:\n type: object\n x-dcl-go-name: Scaling\n x-dcl-go-type: ServiceTemplateScaling\n description: Scaling settings for this Revision.\n properties:\n maxInstanceCount:\n type: integer\n format: int64\n x-dcl-go-name: MaxInstanceCount\n description: Maximum number of serving instances that this resource\n should have.\n minInstanceCount:\n type: integer\n format: int64\n x-dcl-go-name: MinInstanceCount\n description: Minimum number of serving instances that this resource\n should have.\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Email address of the IAM service account associated with\n the revision of the service. The service account represents the identity\n of the running revision, and determines what permissions the revision\n has. If not provided, the revision will use the project's default\n service account.\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n timeout:\n type: string\n x-dcl-go-name: Timeout\n description: Max allowed time for an instance to respond to a request.\n volumes:\n type: array\n x-dcl-go-name: Volumes\n description: A list of Volumes to make available to containers.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateVolumes\n required:\n - name\n properties:\n cloudSqlInstance:\n type: object\n x-dcl-go-name: CloudSqlInstance\n x-dcl-go-type: ServiceTemplateVolumesCloudSqlInstance\n description: For Cloud SQL volumes, contains the specific instances\n that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run\n for more information on how to connect Cloud SQL and Cloud Run.\n x-dcl-conflicts:\n - secret\n properties:\n instances:\n type: array\n x-dcl-go-name: Instances\n description: 'The Cloud SQL instance connection names, as\n can be found in https://console.cloud.google.com/sql/instances.\n Visit https://cloud.google.com/sql/docs/mysql/connect-run\n for more information on how to connect Cloud SQL and Cloud\n Run. Format: {project}:{location}:{instance}'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Sql/Instance\n field: instanceName\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Volume's name.\n secret:\n type: object\n x-dcl-go-name: Secret\n x-dcl-go-type: ServiceTemplateVolumesSecret\n description: 'Secret represents a secret that should populate\n this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret'\n x-dcl-conflicts:\n - cloudSqlInstance\n required:\n - secret\n properties:\n defaultMode:\n type: integer\n format: int64\n x-dcl-go-name: DefaultMode\n description: 'Integer representation of mode bits to use on\n created files by default. Must be a value between 0000 and\n 0777 (octal), defaulting to 0644. Directories within the\n path are not affected by this setting. Notes * Internally,\n a umask of 0222 will be applied to any non-zero value. *\n This is an integer representation of the mode bits. So,\n the octal integer value should look exactly as the chmod\n numeric notation with a leading zero. Some examples: for\n chmod 777 (a=rwx), set to 0777 (octal) or 511 (base-10).\n For chmod 640 (u=rw,g=r), set to 0640 (octal) or 416 (base-10).\n For chmod 755 (u=rwx,g=rx,o=rx), set to 0755 (octal) or\n 493 (base-10). * This might be in conflict with other options\n that affect the file mode, like fsGroup, and the result\n can be other mode bits set. This might be in conflict with\n other options that affect the file mode, like fsGroup, and\n as a result, other mode bits could be set.'\n items:\n type: array\n x-dcl-go-name: Items\n description: If unspecified, the volume will expose a file\n whose name is the secret, relative to VolumeMount.mount_path.\n If specified, the key will be used as the version to fetch\n from Cloud Secret Manager and the path will be the name\n of the file exposed in the volume. When items are defined,\n they must specify a path and a version.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateVolumesSecretItems\n required:\n - path\n properties:\n mode:\n type: integer\n format: int64\n x-dcl-go-name: Mode\n description: 'Integer octal mode bits to use on this\n file, must be a value between 01 and 0777 (octal).\n If 0 or not set, the Volume''s default mode will be\n used. Notes * Internally, a umask of 0222 will be\n applied to any non-zero value. * This is an integer\n representation of the mode bits. So, the octal integer\n value should look exactly as the chmod numeric notation\n with a leading zero. Some examples: for chmod 777\n (a=rwx), set to 0777 (octal) or 511 (base-10). For\n chmod 640 (u=rw,g=r), set to 0640 (octal) or 416 (base-10).\n For chmod 755 (u=rwx,g=rx,o=rx), set to 0755 (octal)\n or 493 (base-10). * This might be in conflict with\n other options that affect the file mode, like fsGroup,\n and the result can be other mode bits set.'\n path:\n type: string\n x-dcl-go-name: Path\n description: Required. The relative path of the secret\n in the container.\n version:\n type: string\n x-dcl-go-name: Version\n description: The Cloud Secret Manager secret version.\n Can be 'latest' for the latest value or an integer\n for a specific version.\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: name\n secret:\n type: string\n x-dcl-go-name: Secret\n description: 'Required. The name of the secret in Cloud Secret\n Manager. Format: {secret} if the secret is in the same project.\n projects/{project}/secrets/{secret} if the secret is in\n a different project.'\n x-dcl-references:\n - resource: Secretmanager/Secret\n field: name\n vpcAccess:\n type: object\n x-dcl-go-name: VPCAccess\n x-dcl-go-type: ServiceTemplateVPCAccess\n description: VPC Access configuration to use for this Revision. For\n more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc.\n properties:\n connector:\n type: string\n x-dcl-go-name: Connector\n description: 'VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}'\n x-dcl-references:\n - resource: Vpcaccess/Connector\n field: name\n egress:\n type: string\n x-dcl-go-name: Egress\n x-dcl-go-type: ServiceTemplateVPCAccessEgressEnum\n description: 'Traffic VPC egress settings. Possible values: VPC_EGRESS_UNSPECIFIED,\n ALL_TRAFFIC, PRIVATE_RANGES_ONLY'\n enum:\n - VPC_EGRESS_UNSPECIFIED\n - ALL_TRAFFIC\n - PRIVATE_RANGES_ONLY\n terminalCondition:\n type: object\n x-dcl-go-name: TerminalCondition\n x-dcl-go-type: ServiceTerminalCondition\n readOnly: true\n description: Output only. The Condition of this Service, containing its\n readiness status, and detailed error information in case it did not reach\n a serving state. See comments in `reconciling` for additional information\n on reconciliation process in Cloud Run.\n properties:\n jobReason:\n type: string\n x-dcl-go-name: JobReason\n x-dcl-go-type: ServiceTerminalConditionJobReasonEnum\n description: 'A reason for the job condition. Possible values: JOB_REASON_UNDEFINED,\n JOB_STATUS_SERVICE_POLLING_ERROR'\n x-dcl-conflicts:\n - reason\n - revisionReason\n enum:\n - JOB_REASON_UNDEFINED\n - JOB_STATUS_SERVICE_POLLING_ERROR\n lastTransitionTime:\n type: string\n format: date-time\n x-dcl-go-name: LastTransitionTime\n description: Last time the condition transitioned from one status to\n another.\n message:\n type: string\n x-dcl-go-name: Message\n description: Human readable message indicating details about the current\n status.\n reason:\n type: string\n x-dcl-go-name: Reason\n x-dcl-go-type: ServiceTerminalConditionReasonEnum\n description: 'A common (service-level) reason for this condition. Possible\n values: COMMON_REASON_UNDEFINED, UNKNOWN, REVISION_FAILED, PROGRESS_DEADLINE_EXCEEDED,\n CONTAINER_MISSING, CONTAINER_PERMISSION_DENIED, CONTAINER_IMAGE_UNAUTHORIZED,\n CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED, ENCRYPTION_KEY_PERMISSION_DENIED,\n ENCRYPTION_KEY_CHECK_FAILED, SECRETS_ACCESS_CHECK_FAILED, WAITING_FOR_OPERATION,\n IMMEDIATE_RETRY, POSTPONED_RETRY, INTERNAL'\n x-dcl-conflicts:\n - revisionReason\n - jobReason\n enum:\n - COMMON_REASON_UNDEFINED\n - UNKNOWN\n - REVISION_FAILED\n - PROGRESS_DEADLINE_EXCEEDED\n - CONTAINER_MISSING\n - CONTAINER_PERMISSION_DENIED\n - CONTAINER_IMAGE_UNAUTHORIZED\n - CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED\n - ENCRYPTION_KEY_PERMISSION_DENIED\n - ENCRYPTION_KEY_CHECK_FAILED\n - SECRETS_ACCESS_CHECK_FAILED\n - WAITING_FOR_OPERATION\n - IMMEDIATE_RETRY\n - POSTPONED_RETRY\n - INTERNAL\n revisionReason:\n type: string\n x-dcl-go-name: RevisionReason\n x-dcl-go-type: ServiceTerminalConditionRevisionReasonEnum\n description: 'A reason for the revision condition. Possible values:\n REVISION_REASON_UNDEFINED, PENDING, RESERVE, RETIRED, RETIRING, RECREATING,\n HEALTH_CHECK_CONTAINER_ERROR, CUSTOMIZED_PATH_RESPONSE_PENDING, MIN_INSTANCES_NOT_PROVISIONED,\n ACTIVE_REVISION_LIMIT_REACHED, NO_DEPLOYMENT'\n x-dcl-conflicts:\n - reason\n - jobReason\n enum:\n - REVISION_REASON_UNDEFINED\n - PENDING\n - RESERVE\n - RETIRED\n - RETIRING\n - RECREATING\n - HEALTH_CHECK_CONTAINER_ERROR\n - CUSTOMIZED_PATH_RESPONSE_PENDING\n - MIN_INSTANCES_NOT_PROVISIONED\n - ACTIVE_REVISION_LIMIT_REACHED\n - NO_DEPLOYMENT\n severity:\n type: string\n x-dcl-go-name: Severity\n x-dcl-go-type: ServiceTerminalConditionSeverityEnum\n description: 'How to interpret failures of this condition, one of Error,\n Warning, Info Possible values: SEVERITY_UNSPECIFIED, ERROR, WARNING,\n INFO'\n enum:\n - SEVERITY_UNSPECIFIED\n - ERROR\n - WARNING\n - INFO\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: ServiceTerminalConditionStateEnum\n description: 'State of the condition. Possible values: STATE_UNSPECIFIED,\n CONDITION_PENDING, CONDITION_RECONCILING, CONDITION_FAILED, CONDITION_SUCCEEDED'\n enum:\n - STATE_UNSPECIFIED\n - CONDITION_PENDING\n - CONDITION_RECONCILING\n - CONDITION_FAILED\n - CONDITION_SUCCEEDED\n type:\n type: string\n x-dcl-go-name: Type\n description: 'type is used to communicate the status of the reconciliation\n process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting\n Types common to all resources include: * \"Ready\": True when the Resource\n is ready.'\n traffic:\n type: array\n x-dcl-go-name: Traffic\n description: Specifies how to distribute traffic over a collection of Revisions\n belonging to the Service. If traffic is empty or not provided, defaults\n to 100% traffic to the latest `Ready` Revision.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTraffic\n properties:\n percent:\n type: integer\n format: int64\n x-dcl-go-name: Percent\n description: Specifies percent of the traffic to this Revision. This\n defaults to zero if unspecified. Cloud Run currently requires 100\n percent for a single TrafficTarget entry.\n revision:\n type: string\n x-dcl-go-name: Revision\n description: Revision to which to send this portion of traffic, if\n traffic allocation is by revision.\n tag:\n type: string\n x-dcl-go-name: Tag\n description: Indicates a string to be part of the URI to exclusively\n reference this target.\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: ServiceTrafficTypeEnum\n description: 'The allocation type for this traffic target. Possible\n values: TRAFFIC_TARGET_ALLOCATION_TYPE_UNSPECIFIED, TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST,\n TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION'\n enum:\n - TRAFFIC_TARGET_ALLOCATION_TYPE_UNSPECIFIED\n - TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\n - TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION\n trafficStatuses:\n type: array\n x-dcl-go-name: TrafficStatuses\n readOnly: true\n description: Output only. Detailed status information for corresponding\n traffic targets. See comments in `reconciling` for additional information\n on reconciliation process in Cloud Run.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTrafficStatuses\n properties:\n percent:\n type: integer\n format: int64\n x-dcl-go-name: Percent\n description: Specifies percent of the traffic to this Revision.\n revision:\n type: string\n x-dcl-go-name: Revision\n description: Revision to which this traffic is sent.\n tag:\n type: string\n x-dcl-go-name: Tag\n description: Indicates the string used in the URI to exclusively reference\n this target.\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: ServiceTrafficStatusesTypeEnum\n description: 'The allocation type for this traffic target. Possible\n values: TRAFFIC_TARGET_ALLOCATION_TYPE_UNSPECIFIED, TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST,\n TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION'\n enum:\n - TRAFFIC_TARGET_ALLOCATION_TYPE_UNSPECIFIED\n - TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\n - TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION\n uri:\n type: string\n x-dcl-go-name: Uri\n description: Displays the target URI.\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Server assigned unique identifier for the trigger.\n The value is a UUID4 string and guaranteed to remain unchanged until the\n resource is deleted.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last-modified time.\n x-kubernetes-immutable: true\n uri:\n type: string\n x-dcl-go-name: Uri\n readOnly: true\n description: Output only. The main URI in which this Service is serving\n traffic.\n x-kubernetes-immutable: true\n") +var YAML_service = []byte("info:\n title: Run/Service\n description: The Run Service resource\n x-dcl-struct-name: Service\n x-dcl-has-iam: true\npaths:\n get:\n description: The function used to get information about a Service\n parameters:\n - name: Service\n required: true\n description: A full instance of a Service\n apply:\n description: The function used to apply information about a Service\n parameters:\n - name: Service\n required: true\n description: A full instance of a Service\n delete:\n description: The function used to delete a Service\n parameters:\n - name: Service\n required: true\n description: A full instance of a Service\n deleteAll:\n description: The function used to delete all Service\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Service\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Service:\n title: Service\n x-dcl-id: projects/{{project}}/locations/{{location}}/services/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: true\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - template\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'Unstructured key value map that may be set by external tools\n to store and arbitrary metadata. They are not queryable and should be\n preserved when modifying objects. Cloud Run will populate some annotations\n using ''run.googleapis.com'' or ''serving.knative.dev'' namespaces. This\n field follows Kubernetes annotations'' namespacing, limits, and rules.\n More info: http://kubernetes.io/docs/user-guide/annotations'\n binaryAuthorization:\n type: object\n x-dcl-go-name: BinaryAuthorization\n x-dcl-go-type: ServiceBinaryAuthorization\n description: Settings for the Binary Authorization feature.\n properties:\n breakglassJustification:\n type: string\n x-dcl-go-name: BreakglassJustification\n description: If present, indicates to use Breakglass using this justification.\n For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass\n useDefault:\n type: boolean\n x-dcl-go-name: UseDefault\n description: If True, indicates to use the default project's binary\n authorization policy. If False, binary authorization will be disabled\n client:\n type: string\n x-dcl-go-name: Client\n description: Arbitrary identifier for the API client.\n clientVersion:\n type: string\n x-dcl-go-name: ClientVersion\n description: Arbitrary version identifier for the API client.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation time.\n x-kubernetes-immutable: true\n creator:\n type: string\n x-dcl-go-name: Creator\n readOnly: true\n description: Output only. Email address of the authenticated creator.\n x-kubernetes-immutable: true\n deleteTime:\n type: string\n format: date-time\n x-dcl-go-name: DeleteTime\n readOnly: true\n description: Output only. The deletion time.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: User-provided description of the Service.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Output only. A system-generated fingerprint for this version\n of the resource. May be used to detect modification conflict during updates.\n x-kubernetes-immutable: true\n expireTime:\n type: string\n format: date-time\n x-dcl-go-name: ExpireTime\n readOnly: true\n description: Output only. For a deleted resource, the time after which it\n will be permamently deleted.\n x-kubernetes-immutable: true\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n readOnly: true\n description: Output only. A number that monotonically increases every time\n the user modifies the desired state.\n x-kubernetes-immutable: true\n ingress:\n type: string\n x-dcl-go-name: Ingress\n x-dcl-go-type: ServiceIngressEnum\n description: Provides the ingress settings for this Service. On output,\n returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED\n if no revision is active.\n enum:\n - INGRESS_TRAFFIC_UNSPECIFIED\n - INGRESS_TRAFFIC_ALL\n - INGRESS_TRAFFIC_INTERNAL_ONLY\n - INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n readOnly: true\n description: Map of string keys and values that can be used to organize\n and categorize objects. User-provided labels are shared with Google's\n billing system, so they can be used to filter, or break down billing charges\n by team, component, environment, state, etc. For more information, visit\n https://cloud.google.com/resource-manager/docs/creating-managing-labels\n or https://cloud.google.com/run/docs/configuring/labels Cloud Run will\n populate some labels with 'run.googleapis.com' or 'serving.knative.dev'\n namespaces. Those labels are read-only, and user changes will not be preserved.\n x-kubernetes-immutable: true\n lastModifier:\n type: string\n x-dcl-go-name: LastModifier\n readOnly: true\n description: Output only. Email address of the last authenticated modifier.\n x-kubernetes-immutable: true\n latestCreatedRevision:\n type: string\n x-dcl-go-name: LatestCreatedRevision\n readOnly: true\n description: Output only. Name of the last created revision. See comments\n in `reconciling` for additional information on reconciliation process\n in Cloud Run.\n x-kubernetes-immutable: true\n latestReadyRevision:\n type: string\n x-dcl-go-name: LatestReadyRevision\n readOnly: true\n description: Output only. Name of the latest revision that is serving traffic.\n See comments in `reconciling` for additional information on reconciliation\n process in Cloud Run.\n x-kubernetes-immutable: true\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: ServiceLaunchStageEnum\n description: 'The launch stage as defined by [Google Cloud Platform Launch\n Stages](http://cloud.google.com/terms/launch-stages). Cloud Run supports\n `ALPHA`, `BETA`, and `GA`. If no value is specified, GA is assumed. Possible\n values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS,\n ALPHA, BETA, GA, DEPRECATED'\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Output only. The fully qualified name of this Service, composed\n from CreateServiceRequest.parent and CreateServiceRequest.service_id.\n Format: projects/{project}/locations/{location}/services/{service_id}'\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: 'Output only. Returns true if the Service is currently being\n acted upon by the system to bring it into the desired state. When a new\n Service is created, or an existing one is updated, Cloud Run will asynchronously\n perform all necessary steps to bring the Service to the desired serving\n state. This process is called reconciliation. While reconciliation is\n in process, `observed_generation`, `latest_ready_revison`, `traffic_statuses`,\n and `uri` will have transient values that might mismatch the intended\n state: Once reconciliation is over (and this field is false), there are\n two possible outcomes: reconciliation succeeded and the serving state\n matches the Service, or there was an error, and reconciliation failed.\n This state can be found in `terminal_condition.state`. If reconciliation\n succeeded, the following fields will match: `traffic` and `traffic_statuses`,\n `observed_generation` and `generation`, `latest_ready_revision` and `latest_created_revision`.\n If reconciliation failed, `traffic_statuses`, `observed_generation`, and\n `latest_ready_revision` will have the state of the last serving revision,\n or empty for newly created Services. Additional information on the failure\n can be found in `terminal_condition` and `conditions`.'\n x-kubernetes-immutable: true\n template:\n type: object\n x-dcl-go-name: Template\n x-dcl-go-type: ServiceTemplate\n description: Required. The template used to create revisions for this Service.\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: KRM-style annotations for the resource.\n containerConcurrency:\n type: integer\n format: int64\n x-dcl-go-name: ContainerConcurrency\n description: Sets the maximum number of requests that each serving instance\n can receive.\n containers:\n type: array\n x-dcl-go-name: Containers\n description: Holds the single container that defines the unit of execution\n for this Revision.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateContainers\n required:\n - image\n properties:\n args:\n type: array\n x-dcl-go-name: Args\n description: 'Arguments to the entrypoint. The docker image''s\n CMD is used if this is not provided. Variable references $(VAR_NAME)\n are expanded using the container''s environment. If a variable\n cannot be resolved, the reference in the input string will be\n unchanged. The $(VAR_NAME) syntax can be escaped with a double\n $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\n regardless of whether the variable exists or not. More info:\n https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n command:\n type: array\n x-dcl-go-name: Command\n description: 'Entrypoint array. Not executed within a shell. The\n docker image''s ENTRYPOINT is used if this is not provided.\n Variable references $(VAR_NAME) are expanded using the container''s\n environment. If a variable cannot be resolved, the reference\n in the input string will be unchanged. The $(VAR_NAME) syntax\n can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references\n will never be expanded, regardless of whether the variable exists\n or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n env:\n type: array\n x-dcl-go-name: Env\n description: List of environment variables to set in the container.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateContainersEnv\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Name of the environment variable.\n Must be a C_IDENTIFIER, and mnay not exceed 32768 characters.\n value:\n type: string\n x-dcl-go-name: Value\n description: 'Variable references $(VAR_NAME) are expanded\n using the previous defined environment variables in the\n container and any route environment variables. If a variable\n cannot be resolved, the reference in the input string\n will be unchanged. The $(VAR_NAME) syntax can be escaped\n with a double $$, ie: $$(VAR_NAME). Escaped references\n will never be expanded, regardless of whether the variable\n exists or not. Defaults to \"\", and the maximum length\n is 32768 bytes.'\n x-dcl-conflicts:\n - valueSource\n valueSource:\n type: object\n x-dcl-go-name: ValueSource\n x-dcl-go-type: ServiceTemplateContainersEnvValueSource\n description: Source for the environment variable's value.\n x-dcl-conflicts:\n - value\n properties:\n secretKeyRef:\n type: object\n x-dcl-go-name: SecretKeyRef\n x-dcl-go-type: ServiceTemplateContainersEnvValueSourceSecretKeyRef\n description: Selects a secret and a specific version\n from Cloud Secret Manager.\n required:\n - secret\n properties:\n secret:\n type: string\n x-dcl-go-name: Secret\n description: Required. The name of the secret in\n Cloud Secret Manager. Format {secret_name} if\n the secret is in the same project. projects/{project}/secrets/{secret_name}\n if the secret is in a different project.\n x-dcl-references:\n - resource: Secretmanager/Secret\n field: name\n version:\n type: string\n x-dcl-go-name: Version\n description: The Cloud Secret Manager secret version.\n Can be 'latest' for the latest value or an integer\n for a specific version.\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: name\n image:\n type: string\n x-dcl-go-name: Image\n description: 'Required. URL of the Container image in Google Container\n Registry or Docker More info: https://kubernetes.io/docs/concepts/containers/images'\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the container specified as a DNS_LABEL.\n ports:\n type: array\n x-dcl-go-name: Ports\n description: List of ports to expose from the container. Only\n a single port can be specified. The specified ports must be\n listening on all interfaces (0.0.0.0) within the container to\n be accessible. If omitted, a port number will be chosen and\n passed to the container through the PORT environment variable\n for the container to listen on.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateContainersPorts\n properties:\n containerPort:\n type: integer\n format: int64\n x-dcl-go-name: ContainerPort\n description: Port number the container listens on. This\n must be a valid TCP port number, 0 < container_port <\n 65536.\n name:\n type: string\n x-dcl-go-name: Name\n description: If specified, used to specify which protocol\n to use. Allowed values are \"http1\" and \"h2c\".\n resources:\n type: object\n x-dcl-go-name: Resources\n x-dcl-go-type: ServiceTemplateContainersResources\n description: 'Compute Resource requirements by this container.\n More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'\n properties:\n cpuIdle:\n type: boolean\n x-dcl-go-name: CpuIdle\n description: Determines whether CPU should be throttled or\n not outside of requests.\n limits:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Limits\n description: 'Only memory and CPU are supported. Note: The\n only supported values for CPU are ''1'', ''2'', and ''4''.\n Setting 4 CPU requires at least 2Gi of memory. The values\n of the map is string form of the ''quantity'' k8s type:\n https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go'\n volumeMounts:\n type: array\n x-dcl-go-name: VolumeMounts\n description: Volume to mount into the container's filesystem.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateContainersVolumeMounts\n required:\n - name\n - mountPath\n properties:\n mountPath:\n type: string\n x-dcl-go-name: MountPath\n description: Required. Path within the container at which\n the volume should be mounted. Must not contain ':'. For\n Cloud SQL volumes, it can be left empty, or must otherwise\n be `/cloudsql`. All instances defined in the Volume will\n be available as `/cloudsql/[instance]`. For more information\n on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. This must match the Name of a Volume.\n executionEnvironment:\n type: string\n x-dcl-go-name: ExecutionEnvironment\n x-dcl-go-type: ServiceTemplateExecutionEnvironmentEnum\n description: 'The sandbox environment to host this Revision. Possible\n values: EXECUTION_ENVIRONMENT_UNSPECIFIED, EXECUTION_ENVIRONMENT_GEN1,\n EXECUTION_ENVIRONMENT_GEN2'\n enum:\n - EXECUTION_ENVIRONMENT_UNSPECIFIED\n - EXECUTION_ENVIRONMENT_GEN1\n - EXECUTION_ENVIRONMENT_GEN2\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: KRM-style labels for the resource.\n revision:\n type: string\n x-dcl-go-name: Revision\n description: The unique name for the revision. If this field is omitted,\n it will be automatically generated based on the Service name.\n scaling:\n type: object\n x-dcl-go-name: Scaling\n x-dcl-go-type: ServiceTemplateScaling\n description: Scaling settings for this Revision.\n properties:\n maxInstanceCount:\n type: integer\n format: int64\n x-dcl-go-name: MaxInstanceCount\n description: Maximum number of serving instances that this resource\n should have.\n minInstanceCount:\n type: integer\n format: int64\n x-dcl-go-name: MinInstanceCount\n description: Minimum number of serving instances that this resource\n should have.\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Email address of the IAM service account associated with\n the revision of the service. The service account represents the identity\n of the running revision, and determines what permissions the revision\n has. If not provided, the revision will use the project's default\n service account.\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n timeout:\n type: string\n x-dcl-go-name: Timeout\n description: Max allowed time for an instance to respond to a request.\n volumes:\n type: array\n x-dcl-go-name: Volumes\n description: A list of Volumes to make available to containers.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateVolumes\n required:\n - name\n properties:\n cloudSqlInstance:\n type: object\n x-dcl-go-name: CloudSqlInstance\n x-dcl-go-type: ServiceTemplateVolumesCloudSqlInstance\n description: For Cloud SQL volumes, contains the specific instances\n that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run\n for more information on how to connect Cloud SQL and Cloud Run.\n x-dcl-conflicts:\n - secret\n properties:\n instances:\n type: array\n x-dcl-go-name: Instances\n description: The Cloud SQL instance connection names, as can\n be found in https://console.cloud.google.com/sql/instances.\n Visit https://cloud.google.com/sql/docs/mysql/connect-run\n for more information on how to connect Cloud SQL and Cloud\n Run. Format {project}:{location}:{instance}\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Sql/Instance\n field: instanceName\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Volume's name.\n secret:\n type: object\n x-dcl-go-name: Secret\n x-dcl-go-type: ServiceTemplateVolumesSecret\n description: 'Secret represents a secret that should populate\n this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret'\n x-dcl-conflicts:\n - cloudSqlInstance\n required:\n - secret\n properties:\n defaultMode:\n type: integer\n format: int64\n x-dcl-go-name: DefaultMode\n description: 'Integer representation of mode bits to use on\n created files by default. Must be a value between 0000 and\n 0777 (octal), defaulting to 0644. Directories within the\n path are not affected by this setting. Notes * Internally,\n a umask of 0222 will be applied to any non-zero value. *\n This is an integer representation of the mode bits. So,\n the octal integer value should look exactly as the chmod\n numeric notation with a leading zero. Some examples: for\n chmod 777 (a=rwx), set to 0777 (octal) or 511 (base-10).\n For chmod 640 (u=rw,g=r), set to 0640 (octal) or 416 (base-10).\n For chmod 755 (u=rwx,g=rx,o=rx), set to 0755 (octal) or\n 493 (base-10). * This might be in conflict with other options\n that affect the file mode, like fsGroup, and the result\n can be other mode bits set. This might be in conflict with\n other options that affect the file mode, like fsGroup, and\n as a result, other mode bits could be set.'\n items:\n type: array\n x-dcl-go-name: Items\n description: If unspecified, the volume will expose a file\n whose name is the secret, relative to VolumeMount.mount_path.\n If specified, the key will be used as the version to fetch\n from Cloud Secret Manager and the path will be the name\n of the file exposed in the volume. When items are defined,\n they must specify a path and a version.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTemplateVolumesSecretItems\n required:\n - path\n properties:\n mode:\n type: integer\n format: int64\n x-dcl-go-name: Mode\n description: 'Integer octal mode bits to use on this\n file, must be a value between 01 and 0777 (octal).\n If 0 or not set, the Volume''s default mode will be\n used. Notes * Internally, a umask of 0222 will be\n applied to any non-zero value. * This is an integer\n representation of the mode bits. So, the octal integer\n value should look exactly as the chmod numeric notation\n with a leading zero. Some examples: for chmod 777\n (a=rwx), set to 0777 (octal) or 511 (base-10). For\n chmod 640 (u=rw,g=r), set to 0640 (octal) or 416 (base-10).\n For chmod 755 (u=rwx,g=rx,o=rx), set to 0755 (octal)\n or 493 (base-10). * This might be in conflict with\n other options that affect the file mode, like fsGroup,\n and the result can be other mode bits set.'\n path:\n type: string\n x-dcl-go-name: Path\n description: Required. The relative path of the secret\n in the container.\n version:\n type: string\n x-dcl-go-name: Version\n description: The Cloud Secret Manager secret version.\n Can be 'latest' for the latest value or an integer\n for a specific version.\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: name\n secret:\n type: string\n x-dcl-go-name: Secret\n description: Required. The name of the secret in Cloud Secret\n Manager. Format {secret} if the secret is in the same project.\n projects/{project}/secrets/{secret} if the secret is in\n a different project.\n x-dcl-references:\n - resource: Secretmanager/Secret\n field: name\n vpcAccess:\n type: object\n x-dcl-go-name: VPCAccess\n x-dcl-go-type: ServiceTemplateVPCAccess\n description: VPC Access configuration to use for this Revision. For\n more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc.\n properties:\n connector:\n type: string\n x-dcl-go-name: Connector\n description: 'VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}'\n x-dcl-references:\n - resource: Vpcaccess/Connector\n field: name\n egress:\n type: string\n x-dcl-go-name: Egress\n x-dcl-go-type: ServiceTemplateVPCAccessEgressEnum\n description: 'Traffic VPC egress settings. Possible values: VPC_EGRESS_UNSPECIFIED,\n ALL_TRAFFIC, PRIVATE_RANGES_ONLY'\n enum:\n - VPC_EGRESS_UNSPECIFIED\n - ALL_TRAFFIC\n - PRIVATE_RANGES_ONLY\n terminalCondition:\n type: object\n x-dcl-go-name: TerminalCondition\n x-dcl-go-type: ServiceTerminalCondition\n readOnly: true\n description: Output only. The Condition of this Service, containing its\n readiness status, and detailed error information in case it did not reach\n a serving state. See comments in `reconciling` for additional information\n on reconciliation process in Cloud Run.\n properties:\n jobReason:\n type: string\n x-dcl-go-name: JobReason\n x-dcl-go-type: ServiceTerminalConditionJobReasonEnum\n description: 'A reason for the job condition. Possible values: JOB_REASON_UNDEFINED,\n JOB_STATUS_SERVICE_POLLING_ERROR'\n x-dcl-conflicts:\n - reason\n - revisionReason\n enum:\n - JOB_REASON_UNDEFINED\n - JOB_STATUS_SERVICE_POLLING_ERROR\n lastTransitionTime:\n type: string\n format: date-time\n x-dcl-go-name: LastTransitionTime\n description: Last time the condition transitioned from one status to\n another.\n message:\n type: string\n x-dcl-go-name: Message\n description: Human readable message indicating details about the current\n status.\n reason:\n type: string\n x-dcl-go-name: Reason\n x-dcl-go-type: ServiceTerminalConditionReasonEnum\n description: 'A common (service-level) reason for this condition. Possible\n values: COMMON_REASON_UNDEFINED, UNKNOWN, REVISION_FAILED, PROGRESS_DEADLINE_EXCEEDED,\n CONTAINER_MISSING, CONTAINER_PERMISSION_DENIED, CONTAINER_IMAGE_UNAUTHORIZED,\n CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED, ENCRYPTION_KEY_PERMISSION_DENIED,\n ENCRYPTION_KEY_CHECK_FAILED, SECRETS_ACCESS_CHECK_FAILED, WAITING_FOR_OPERATION,\n IMMEDIATE_RETRY, POSTPONED_RETRY, INTERNAL'\n x-dcl-conflicts:\n - revisionReason\n - jobReason\n enum:\n - COMMON_REASON_UNDEFINED\n - UNKNOWN\n - REVISION_FAILED\n - PROGRESS_DEADLINE_EXCEEDED\n - CONTAINER_MISSING\n - CONTAINER_PERMISSION_DENIED\n - CONTAINER_IMAGE_UNAUTHORIZED\n - CONTAINER_IMAGE_AUTHORIZATION_CHECK_FAILED\n - ENCRYPTION_KEY_PERMISSION_DENIED\n - ENCRYPTION_KEY_CHECK_FAILED\n - SECRETS_ACCESS_CHECK_FAILED\n - WAITING_FOR_OPERATION\n - IMMEDIATE_RETRY\n - POSTPONED_RETRY\n - INTERNAL\n revisionReason:\n type: string\n x-dcl-go-name: RevisionReason\n x-dcl-go-type: ServiceTerminalConditionRevisionReasonEnum\n description: 'A reason for the revision condition. Possible values:\n REVISION_REASON_UNDEFINED, PENDING, RESERVE, RETIRED, RETIRING, RECREATING,\n HEALTH_CHECK_CONTAINER_ERROR, CUSTOMIZED_PATH_RESPONSE_PENDING, MIN_INSTANCES_NOT_PROVISIONED,\n ACTIVE_REVISION_LIMIT_REACHED, NO_DEPLOYMENT'\n x-dcl-conflicts:\n - reason\n - jobReason\n enum:\n - REVISION_REASON_UNDEFINED\n - PENDING\n - RESERVE\n - RETIRED\n - RETIRING\n - RECREATING\n - HEALTH_CHECK_CONTAINER_ERROR\n - CUSTOMIZED_PATH_RESPONSE_PENDING\n - MIN_INSTANCES_NOT_PROVISIONED\n - ACTIVE_REVISION_LIMIT_REACHED\n - NO_DEPLOYMENT\n severity:\n type: string\n x-dcl-go-name: Severity\n x-dcl-go-type: ServiceTerminalConditionSeverityEnum\n description: 'How to interpret failures of this condition, one of Error,\n Warning, Info Possible values: SEVERITY_UNSPECIFIED, ERROR, WARNING,\n INFO'\n enum:\n - SEVERITY_UNSPECIFIED\n - ERROR\n - WARNING\n - INFO\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: ServiceTerminalConditionStateEnum\n description: 'State of the condition. Possible values: STATE_UNSPECIFIED,\n CONDITION_PENDING, CONDITION_RECONCILING, CONDITION_FAILED, CONDITION_SUCCEEDED'\n enum:\n - STATE_UNSPECIFIED\n - CONDITION_PENDING\n - CONDITION_RECONCILING\n - CONDITION_FAILED\n - CONDITION_SUCCEEDED\n type:\n type: string\n x-dcl-go-name: Type\n description: 'type is used to communicate the status of the reconciliation\n process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting\n Types common to all resources include: * \"Ready\": True when the Resource\n is ready.'\n traffic:\n type: array\n x-dcl-go-name: Traffic\n description: Specifies how to distribute traffic over a collection of Revisions\n belonging to the Service. If traffic is empty or not provided, defaults\n to 100% traffic to the latest `Ready` Revision.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTraffic\n properties:\n percent:\n type: integer\n format: int64\n x-dcl-go-name: Percent\n description: Specifies percent of the traffic to this Revision. This\n defaults to zero if unspecified. Cloud Run currently requires 100\n percent for a single TrafficTarget entry.\n revision:\n type: string\n x-dcl-go-name: Revision\n description: Revision to which to send this portion of traffic, if\n traffic allocation is by revision.\n tag:\n type: string\n x-dcl-go-name: Tag\n description: Indicates a string to be part of the URI to exclusively\n reference this target.\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: ServiceTrafficTypeEnum\n description: 'The allocation type for this traffic target. Possible\n values: TRAFFIC_TARGET_ALLOCATION_TYPE_UNSPECIFIED, TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST,\n TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION'\n enum:\n - TRAFFIC_TARGET_ALLOCATION_TYPE_UNSPECIFIED\n - TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\n - TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION\n trafficStatuses:\n type: array\n x-dcl-go-name: TrafficStatuses\n readOnly: true\n description: Output only. Detailed status information for corresponding\n traffic targets. See comments in `reconciling` for additional information\n on reconciliation process in Cloud Run.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ServiceTrafficStatuses\n properties:\n percent:\n type: integer\n format: int64\n x-dcl-go-name: Percent\n description: Specifies percent of the traffic to this Revision.\n revision:\n type: string\n x-dcl-go-name: Revision\n description: Revision to which this traffic is sent.\n tag:\n type: string\n x-dcl-go-name: Tag\n description: Indicates the string used in the URI to exclusively reference\n this target.\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: ServiceTrafficStatusesTypeEnum\n description: 'The allocation type for this traffic target. Possible\n values: TRAFFIC_TARGET_ALLOCATION_TYPE_UNSPECIFIED, TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST,\n TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION'\n enum:\n - TRAFFIC_TARGET_ALLOCATION_TYPE_UNSPECIFIED\n - TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\n - TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION\n uri:\n type: string\n x-dcl-go-name: Uri\n description: Displays the target URI.\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Server assigned unique identifier for the trigger.\n The value is a UUID4 string and guaranteed to remain unchanged until the\n resource is deleted.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last-modified time.\n x-kubernetes-immutable: true\n uri:\n type: string\n x-dcl-go-name: Uri\n readOnly: true\n description: Output only. The main URI in which this Service is serving\n traffic.\n x-kubernetes-immutable: true\n") -// 43327 bytes -// MD5: 877305590f70f069fac55caa15b0ad1a +// 43318 bytes +// MD5: 477a34fec24cff92cb5c24e3d85ccc91 diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service_schema.go index d903e63889..524f64becf 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/run/alpha/service_schema.go @@ -391,7 +391,7 @@ func DCLServiceSchema() *dcl.Schema { "secret": &dcl.Property{ Type: "string", GoName: "Secret", - Description: "Required. The name of the secret in Cloud Secret Manager. Format: {secret_name} if the secret is in the same project. projects/{project}/secrets/{secret_name} if the secret is in a different project.", + Description: "Required. The name of the secret in Cloud Secret Manager. Format {secret_name} if the secret is in the same project. projects/{project}/secrets/{secret_name} if the secret is in a different project.", ResourceReferences: []*dcl.PropertyResourceReference{ &dcl.PropertyResourceReference{ Resource: "Secretmanager/Secret", @@ -587,7 +587,7 @@ func DCLServiceSchema() *dcl.Schema { "instances": &dcl.Property{ Type: "array", GoName: "Instances", - Description: "The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance}", + Description: "The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format {project}:{location}:{instance}", SendEmpty: true, ListType: "list", Items: &dcl.Property{ @@ -667,7 +667,7 @@ func DCLServiceSchema() *dcl.Schema { "secret": &dcl.Property{ Type: "string", GoName: "Secret", - Description: "Required. The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project.", + Description: "Required. The name of the secret in Cloud Secret Manager. Format {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project.", ResourceReferences: []*dcl.PropertyResourceReference{ &dcl.PropertyResourceReference{ Resource: "Secretmanager/Secret", diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/storage/bucket.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/storage/bucket.go index 78716c15bb..c6ab6d671e 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/storage/bucket.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/storage/bucket.go @@ -637,9 +637,8 @@ func (c *Client) GetBucket(ctx context.Context, r *Bucket) (*Bucket, error) { if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Name = nr.Name + result.Project = r.Project + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector.go index 80b1f51e5b..7ad71aad22 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector.go @@ -239,10 +239,9 @@ func (c *Client) GetConnector(ctx context.Context, r *Connector) (*Connector, er if err != nil { return nil, err } - nr := r.urlNormalized() - result.Project = nr.Project - result.Location = nr.Location - result.Name = nr.Name + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector.yaml b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector.yaml index b208beabab..5c03cba6cd 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector.yaml +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector.yaml @@ -176,9 +176,9 @@ components: name: type: string x-dcl-go-name: Name - description: 'Subnet name (relative, not fully qualified). E.g. if the + description: Subnet name (relative, not fully qualified). E.g. if the full subnet selfLink is https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName} - the correct input for this field would be: {subnetName}' + the correct input for this field would be {subnetName} x-kubernetes-immutable: true x-dcl-references: - resource: Compute/Subnetwork diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector_beta_yaml_embed.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector_beta_yaml_embed.go index c80d04ca34..fcc583eefd 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector_beta_yaml_embed.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector_beta_yaml_embed.go @@ -17,7 +17,7 @@ package beta // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/vpcaccess/beta/connector.yaml -var YAML_connector = []byte("info:\n title: VPCAccess/Connector\n description: The VPCAccess Connector resource\n x-dcl-struct-name: Connector\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Connector\n parameters:\n - name: Connector\n required: true\n description: A full instance of a Connector\n apply:\n description: The function used to apply information about a Connector\n parameters:\n - name: Connector\n required: true\n description: A full instance of a Connector\n delete:\n description: The function used to delete a Connector\n parameters:\n - name: Connector\n required: true\n description: A full instance of a Connector\n deleteAll:\n description: The function used to delete all Connector\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Connector\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Connector:\n title: Connector\n x-dcl-id: projects/{{project}}/locations/{{location}}/connectors/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n connectedProjects:\n type: array\n x-dcl-go-name: ConnectedProjects\n readOnly: true\n description: Output only. List of projects using the connector.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n ipCidrRange:\n type: string\n x-dcl-go-name: IPCidrRange\n description: 'The range of internal addresses that follows RFC 4632 notation.\n Example: `10.132.0.0/28`.'\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: Machine type of VM Instance underlying connector. Default is\n e2-micro\n x-kubernetes-immutable: true\n maxInstances:\n type: integer\n format: int64\n x-dcl-go-name: MaxInstances\n description: Maximum value of instances in autoscaling group underlying\n the connector.\n x-kubernetes-immutable: true\n maxThroughput:\n type: integer\n format: int64\n x-dcl-go-name: MaxThroughput\n description: Maximum throughput of the connector in Mbps. Default is 200,\n max is 1000.\n x-kubernetes-immutable: true\n minInstances:\n type: integer\n format: int64\n x-dcl-go-name: MinInstances\n description: Minimum value of instances in autoscaling group underlying\n the connector.\n x-kubernetes-immutable: true\n minThroughput:\n type: integer\n format: int64\n x-dcl-go-name: MinThroughput\n description: Minimum throughput of the connector in Mbps. Default and min\n is 200.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The resource name in the format `projects/*/locations/*/connectors/*`.\n x-kubernetes-immutable: true\n network:\n type: string\n x-dcl-go-name: Network\n description: Name of a VPC network.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Network\n field: name\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: ConnectorStateEnum\n readOnly: true\n description: 'Output only. State of the VPC access connector. Possible values:\n STATE_UNSPECIFIED, READY, CREATING, DELETING, ERROR, UPDATING'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - READY\n - CREATING\n - DELETING\n - ERROR\n - UPDATING\n subnet:\n type: object\n x-dcl-go-name: Subnet\n x-dcl-go-type: ConnectorSubnet\n description: The subnet in which to house the VPC Access Connector.\n x-kubernetes-immutable: true\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Subnet name (relative, not fully qualified). E.g. if the\n full subnet selfLink is https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName}\n the correct input for this field would be: {subnetName}'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Subnetwork\n field: name\n parent: true\n projectId:\n type: string\n x-dcl-go-name: ProjectId\n description: Project in which the subnet exists. If not set, this project\n is assumed to be the project for which the connector create request\n was issued.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n") +var YAML_connector = []byte("info:\n title: VPCAccess/Connector\n description: The VPCAccess Connector resource\n x-dcl-struct-name: Connector\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Connector\n parameters:\n - name: Connector\n required: true\n description: A full instance of a Connector\n apply:\n description: The function used to apply information about a Connector\n parameters:\n - name: Connector\n required: true\n description: A full instance of a Connector\n delete:\n description: The function used to delete a Connector\n parameters:\n - name: Connector\n required: true\n description: A full instance of a Connector\n deleteAll:\n description: The function used to delete all Connector\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Connector\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Connector:\n title: Connector\n x-dcl-id: projects/{{project}}/locations/{{location}}/connectors/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n connectedProjects:\n type: array\n x-dcl-go-name: ConnectedProjects\n readOnly: true\n description: Output only. List of projects using the connector.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n ipCidrRange:\n type: string\n x-dcl-go-name: IPCidrRange\n description: 'The range of internal addresses that follows RFC 4632 notation.\n Example: `10.132.0.0/28`.'\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n machineType:\n type: string\n x-dcl-go-name: MachineType\n description: Machine type of VM Instance underlying connector. Default is\n e2-micro\n x-kubernetes-immutable: true\n maxInstances:\n type: integer\n format: int64\n x-dcl-go-name: MaxInstances\n description: Maximum value of instances in autoscaling group underlying\n the connector.\n x-kubernetes-immutable: true\n maxThroughput:\n type: integer\n format: int64\n x-dcl-go-name: MaxThroughput\n description: Maximum throughput of the connector in Mbps. Default is 200,\n max is 1000.\n x-kubernetes-immutable: true\n minInstances:\n type: integer\n format: int64\n x-dcl-go-name: MinInstances\n description: Minimum value of instances in autoscaling group underlying\n the connector.\n x-kubernetes-immutable: true\n minThroughput:\n type: integer\n format: int64\n x-dcl-go-name: MinThroughput\n description: Minimum throughput of the connector in Mbps. Default and min\n is 200.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The resource name in the format `projects/*/locations/*/connectors/*`.\n x-kubernetes-immutable: true\n network:\n type: string\n x-dcl-go-name: Network\n description: Name of a VPC network.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Network\n field: name\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: ConnectorStateEnum\n readOnly: true\n description: 'Output only. State of the VPC access connector. Possible values:\n STATE_UNSPECIFIED, READY, CREATING, DELETING, ERROR, UPDATING'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - READY\n - CREATING\n - DELETING\n - ERROR\n - UPDATING\n subnet:\n type: object\n x-dcl-go-name: Subnet\n x-dcl-go-type: ConnectorSubnet\n description: The subnet in which to house the VPC Access Connector.\n x-kubernetes-immutable: true\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Subnet name (relative, not fully qualified). E.g. if the\n full subnet selfLink is https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName}\n the correct input for this field would be {subnetName}\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/Subnetwork\n field: name\n parent: true\n projectId:\n type: string\n x-dcl-go-name: ProjectId\n description: Project in which the subnet exists. If not set, this project\n is assumed to be the project for which the connector create request\n was issued.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n") -// 6096 bytes -// MD5: 3e62ac7735b8c95f598f194e1b987574 +// 6093 bytes +// MD5: 8053a113f347496e5084a618bc280ddd diff --git a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector_schema.go b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector_schema.go index 7a982cce55..6a1967d20c 100644 --- a/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector_schema.go +++ b/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vpcaccess/beta/connector_schema.go @@ -224,7 +224,7 @@ func DCLConnectorSchema() *dcl.Schema { "name": &dcl.Property{ Type: "string", GoName: "Name", - Description: "Subnet name (relative, not fully qualified). E.g. if the full subnet selfLink is https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName} the correct input for this field would be: {subnetName}", + Description: "Subnet name (relative, not fully qualified). E.g. if the full subnet selfLink is https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName} the correct input for this field would be {subnetName}", Immutable: true, ResourceReferences: []*dcl.PropertyResourceReference{ &dcl.PropertyResourceReference{ diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md index 5c751f2158..5152bf59bf 100644 --- a/vendor/github.com/fatih/color/README.md +++ b/vendor/github.com/fatih/color/README.md @@ -78,7 +78,7 @@ notice("Don't forget this...") ### Custom fprint functions (FprintFunc) ```go -blue := color.New(FgBlue).FprintfFunc() +blue := color.New(color.FgBlue).FprintfFunc() blue(myWriter, "important notice: %s", stars) // Mix up with multiple attributes diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index a16f5cd572..21a17c5af3 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -17,7 +17,7 @@ JSON output mode for production. ## Stability Note -This library has reached 1.0 stability. It's API can be considered solidified +This library has reached 1.0 stability. Its API can be considered solidified and promised through future versions. ## Installation and Docs diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index 46ee09fc0c..39391f24fe 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -3,8 +3,9 @@ `go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system that has been in use by HashiCorp tooling for over 4 years. While initially created for [Packer](https://www.packer.io), it is additionally in use by -[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and -[Vault](https://www.vaultproject.io). +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), +[Vault](https://www.vaultproject.io), and +[Boundary](https://www.boundaryproject.io). While the plugin system is over RPC, it is currently only designed to work over a local [reliable] network. Plugins over a real network are not supported diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index 67dca88357..e0bee88a1d 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -574,6 +574,8 @@ func (c *Client) Start() (addr net.Addr, err error) { c.config.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + MinVersion: tls.VersionTLS12, ServerName: "localhost", } } @@ -629,17 +631,19 @@ func (c *Client) Start() (addr net.Addr, err error) { // Wait for the command to end. err := cmd.Wait() - debugMsgArgs := []interface{}{ + msgArgs := []interface{}{ "path", path, "pid", pid, } if err != nil { - debugMsgArgs = append(debugMsgArgs, + msgArgs = append(msgArgs, []interface{}{"error", err.Error()}...) + c.logger.Error("plugin process exited", msgArgs...) + } else { + // Log and make sure to flush the logs right away + c.logger.Info("plugin process exited", msgArgs...) } - // Log and make sure to flush the logs write away - c.logger.Debug("plugin process exited", debugMsgArgs...) os.Stderr.Sync() // Set that we exited, which takes a lock @@ -774,7 +778,7 @@ func (c *Client) Start() (addr net.Addr, err error) { } // loadServerCert is used by AutoMTLS to read an x.509 cert returned by the -// server, and load it as the RootCA for the client TLSConfig. +// server, and load it as the RootCA and ClientCA for the client TLSConfig. func (c *Client) loadServerCert(cert string) error { certPool := x509.NewCertPool() @@ -791,6 +795,7 @@ func (c *Client) loadServerCert(cert string) error { certPool.AddCert(x509Cert) c.config.TLSConfig.RootCAs = certPool + c.config.TLSConfig.ClientCAs = certPool return nil } diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go index 70ba546bf6..185957f8d1 100644 --- a/vendor/github.com/hashicorp/go-plugin/process_posix.go +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go index 5bb18dd5db..449ba6cc1e 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -45,7 +45,11 @@ func (s *RPCServer) Serve(lis net.Listener) { for { conn, err := lis.Accept() if err != nil { - log.Printf("[ERR] plugin: plugin server: %s", err) + severity := "ERR" + if errors.Is(err, net.ErrClosed) { + severity = "DEBUG" + } + log.Printf("[%s] plugin: plugin server: %s", severity, err) return } diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index 7a58cc3919..e134999103 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -304,13 +304,13 @@ func Serve(opts *ServeConfig) { certPEM, keyPEM, err := generateCert() if err != nil { - logger.Error("failed to generate client certificate", "error", err) + logger.Error("failed to generate server certificate", "error", err) panic(err) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { - logger.Error("failed to parse client certificate", "error", err) + logger.Error("failed to parse server certificate", "error", err) panic(err) } @@ -319,6 +319,8 @@ func Serve(opts *ServeConfig) { ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: clientCertPool, MinVersion: tls.VersionTLS12, + RootCAs: clientCertPool, + ServerName: "localhost", } // We send back the raw leaf cert data for the client rather than the diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md index 2020c47274..5f16dd140c 100644 --- a/vendor/github.com/hashicorp/go-version/CHANGELOG.md +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -1,4 +1,17 @@ -# 1.4.0 (January 5, 2021) +# 1.6.0 (June 28, 2022) + +FEATURES: + +- Add `Prerelease` function to `Constraint` to return true if the version includes a prerelease field ([#100](https://github.com/hashicorp/go-version/pull/100)) + +# 1.5.0 (May 18, 2022) + +FEATURES: + +- Use `encoding` `TextMarshaler` & `TextUnmarshaler` instead of JSON equivalents ([#95](https://github.com/hashicorp/go-version/pull/95)) +- Add JSON handlers to allow parsing from/to JSON ([#93](https://github.com/hashicorp/go-version/pull/93)) + +# 1.4.0 (January 5, 2022) FEATURES: diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 851a337beb..4d25050903 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,5 +1,5 @@ # Versioning Library for Go -[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/master) +[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/main) [![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index 1d88090281..da5d1aca14 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -163,6 +163,12 @@ func (c *Constraint) Check(v *Version) bool { return c.f(v, c.check) } +// Prerelease returns true if the version underlying this constraint +// contains a prerelease field. +func (c *Constraint) Prerelease() bool { + return len(c.check.Prerelease()) > 0 +} + func (c *Constraint) String() string { return c.original } diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index 116a74466d..e87df69906 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -388,3 +388,20 @@ func (v *Version) String() string { func (v *Version) Original() string { return v.original } + +// UnmarshalText implements encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(b []byte) error { + temp, err := NewVersion(string(b)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements encoding.TextMarshaler interface. +func (v *Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} diff --git a/vendor/github.com/hashicorp/hc-install/README.md b/vendor/github.com/hashicorp/hc-install/README.md index 87c06a2034..eb287ff0fc 100644 --- a/vendor/github.com/hashicorp/hc-install/README.md +++ b/vendor/github.com/hashicorp/hc-install/README.md @@ -31,7 +31,7 @@ The `Installer` offers a few high-level methods: The `Installer` methods accept number of different `Source` types. Each comes with different trade-offs described below. - - `fs.{AnyVersion,ExactVersion}` - Finds a binary in `$PATH` (or additional paths) + - `fs.{AnyVersion,ExactVersion,Version}` - Finds a binary in `$PATH` (or additional paths) - **Pros:** - This is most convenient when you already have the product installed on your system which you already manage. diff --git a/vendor/github.com/hashicorp/hc-install/fs/version.go b/vendor/github.com/hashicorp/hc-install/fs/version.go new file mode 100644 index 0000000000..26633b8afc --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/version.go @@ -0,0 +1,97 @@ +package fs + +import ( + "context" + "fmt" + "log" + "path/filepath" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/errors" + "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +// Version finds the first executable binary of the product name +// which matches the version constraint within system $PATH and any declared ExtraPaths +// (which are *appended* to any directories in $PATH) +type Version struct { + Product product.Product + Constraints version.Constraints + ExtraPaths []string + Timeout time.Duration + + logger *log.Logger +} + +func (*Version) IsSourceImpl() src.InstallSrcSigil { + return src.InstallSrcSigil{} +} + +func (v *Version) SetLogger(logger *log.Logger) { + v.logger = logger +} + +func (v *Version) log() *log.Logger { + if v.logger == nil { + return discardLogger + } + return v.logger +} + +func (v *Version) Validate() error { + if !validators.IsBinaryNameValid(v.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", v.Product.BinaryName()) + } + if len(v.Constraints) == 0 { + return fmt.Errorf("undeclared version constraints") + } + if v.Product.GetVersion == nil { + return fmt.Errorf("undeclared version getter") + } + return nil +} + +func (v *Version) Find(ctx context.Context) (string, error) { + timeout := defaultTimeout + if v.Timeout > 0 { + timeout = v.Timeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + execPath, err := findFile(lookupDirs(v.ExtraPaths), v.Product.BinaryName(), func(file string) error { + err := checkExecutable(file) + if err != nil { + return err + } + + ver, err := v.Product.GetVersion(ctx, file) + if err != nil { + return err + } + + for _, vc := range v.Constraints { + if !vc.Check(ver) { + return fmt.Errorf("version (%s) doesn't meet constraints %s", ver, vc.String()) + } + } + + return nil + }) + if err != nil { + return "", errors.SkippableErr(err) + } + + if !filepath.IsAbs(execPath) { + var err error + execPath, err = filepath.Abs(execPath) + if err != nil { + return "", errors.SkippableErr(err) + } + } + + return execPath, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/product/vault.go b/vendor/github.com/hashicorp/hc-install/product/vault.go new file mode 100644 index 0000000000..d03bc4fabc --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/product/vault.go @@ -0,0 +1,54 @@ +package product + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "runtime" + "strings" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/build" +) + +var ( + vaultVersionOutputRe = regexp.MustCompile(`Vault ` + simpleVersionRe) + v1_17 = version.Must(version.NewVersion("1.17")) +) + +var Vault = Product{ + Name: "vault", + BinaryName: func() string { + if runtime.GOOS == "windows" { + return "vault.exe" + } + return "vault" + }, + GetVersion: func(ctx context.Context, path string) (*version.Version, error) { + cmd := exec.CommandContext(ctx, path, "version") + + out, err := cmd.Output() + if err != nil { + return nil, err + } + + stdout := strings.TrimSpace(string(out)) + + submatches := vaultVersionOutputRe.FindStringSubmatch(stdout) + if len(submatches) != 2 { + return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout) + } + v, err := version.NewVersion(submatches[1]) + if err != nil { + return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err) + } + + return v, err + }, + BuildInstructions: &BuildInstructions{ + GitRepoURL: "https://github.com/hashicorp/vault.git", + PreCloneCheck: &build.GoIsInstalled{}, + Build: &build.GoBuild{Version: v1_17}, + }, +} diff --git a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md index aa6faaf2fb..9f6c23b1cd 100644 --- a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md +++ b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md @@ -1,5 +1,16 @@ # HCL Changelog +## v2.13.0 (June 22, 2022) + +### Enhancements + +* hcl: `hcl.Diagnostic` how has an additional field `Extra` which is intended for carrying arbitrary supporting data ("extra information") related to the diagnostic message, intended to allow diagnostic renderers to optionally tailor the presentation of messages for particular situations. ([#539](https://github.com/hashicorp/hcl/pull/539)) +* hclsyntax: When an error occurs during a function call, the returned diagnostics will include _extra information_ (as described in the previous point) about which function was being called and, if the message is about an error returned by the function itself, that raw `error` value without any post-processing. ([#539](https://github.com/hashicorp/hcl/pull/539)) + +### Bugs Fixed + +* hclwrite: Fixed a potential data race for any situation where `hclwrite.Format` runs concurrently with itself. ([#534](https://github.com/hashicorp/hcl/pull/534)) + ## v2.12.0 (April 22, 2022) ### Enhancements diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go index c80535b7a7..bcf4eb39c0 100644 --- a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go @@ -63,6 +63,28 @@ type Diagnostic struct { // case of colliding names. Expression Expression EvalContext *EvalContext + + // Extra is an extension point for additional machine-readable information + // about this problem. + // + // Recipients of diagnostic objects may type-assert this value with + // specific interface types they know about to discover if any additional + // information is available that is interesting for their use-case. + // + // Extra is always considered to be optional extra information and so a + // diagnostic message should still always be fully described (from the + // perspective of a human who understands the language the messages are + // written in) by the other fields in case a particular recipient. + // + // Functions that return diagnostics with Extra populated should typically + // document that they place values implementing a particular interface, + // rather than a concrete type, and define that interface such that its + // methods can dynamically indicate a lack of support at runtime even + // if the interface happens to be statically available. An Extra + // type that wraps other Extra values should additionally implement + // interface DiagnosticExtraUnwrapper to return the value they are wrapping + // so that callers can access inner values to type-assert against. + Extra interface{} } // Diagnostics is a list of Diagnostic instances. @@ -141,3 +163,24 @@ type DiagnosticWriter interface { WriteDiagnostic(*Diagnostic) error WriteDiagnostics(Diagnostics) error } + +// DiagnosticExtraUnwrapper is an interface implemented by values in the +// Extra field of Diagnostic when they are wrapping another "Extra" value that +// was generated downstream. +// +// Diagnostic recipients which want to examine "Extra" values to sniff for +// particular types of extra data can either type-assert this interface +// directly and repeatedly unwrap until they recieve nil, or can use the +// helper function DiagnosticExtra. +type DiagnosticExtraUnwrapper interface { + // If the reciever is wrapping another "diagnostic extra" value, returns + // that value. Otherwise returns nil to indicate dynamically that nothing + // is wrapped. + // + // The "nothing is wrapped" condition can be signalled either by this + // method returning nil or by a type not implementing this interface at all. + // + // Implementers should never create unwrap "cycles" where a nested extra + // value returns a value that was also wrapping it. + UnwrapDiagnosticExtra() interface{} +} diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go new file mode 100644 index 0000000000..6994e2336d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +package hcl + +// This file contains additional diagnostics-related symbols that use the +// Go 1.18 type parameters syntax and would therefore be incompatible with +// Go 1.17 and earlier. + +// DiagnosticExtra attempts to retrieve an "extra value" of type T from the +// given diagnostic, if either the diag.Extra field directly contains a value +// of that type or the value implements DiagnosticExtraUnwrapper and directly +// or indirectly returns a value of that type. +// +// Type T should typically be an interface type, so that code which generates +// diagnostics can potentially return different implementations of the same +// interface dynamically as needed. +// +// If a value of type T is found, returns that value and true to indicate +// success. Otherwise, returns the zero value of T and false to indicate +// failure. +func DiagnosticExtra[T any](diag *Diagnostic) (T, bool) { + extra := diag.Extra + var zero T + + for { + if ret, ok := extra.(T); ok { + return ret, true + } + + if unwrap, ok := extra.(DiagnosticExtraUnwrapper); ok { + // If our "extra" implements DiagnosticExtraUnwrapper then we'll + // unwrap one level and try this again. + extra = unwrap.UnwrapDiagnosticExtra() + } else { + return zero, false + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go index 2706998f8d..358fd5d510 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go @@ -26,7 +26,7 @@ type Expression interface { } // Assert that Expression implements hcl.Expression -var assertExprImplExpr hcl.Expression = Expression(nil) +var _ hcl.Expression = Expression(nil) // ParenthesesExpr represents an expression written in grouping // parentheses. @@ -270,6 +270,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti } } + diagExtra := functionCallDiagExtra{ + calledFunctionName: e.Name, + } + params := f.Params() varParam := f.VarParam() @@ -297,6 +301,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: expandExpr, EvalContext: ctx, + Extra: &diagExtra, }) return cty.DynamicVal, diags } @@ -311,6 +316,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: expandExpr, EvalContext: ctx, + Extra: &diagExtra, }) return cty.DynamicVal, diags } @@ -342,6 +348,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: expandExpr, EvalContext: ctx, + Extra: &diagExtra, }) return cty.DynamicVal, diags } @@ -365,6 +372,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: &diagExtra, }, } } @@ -382,6 +390,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: &diagExtra, }, } } @@ -426,6 +435,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: argExpr, EvalContext: ctx, + Extra: &diagExtra, }) } } @@ -442,6 +452,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti resultVal, err := f.Call(argVals) if err != nil { + // For errors in the underlying call itself we also return the raw + // call error via an extra method on our "diagnostic extra" value. + diagExtra.functionCallError = err + switch terr := err.(type) { case function.ArgError: i := terr.Index @@ -479,6 +493,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Subject: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: &diagExtra, }) default: // This is the most degenerate case of all, where the @@ -497,6 +512,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: &diagExtra, }) } } else { @@ -515,6 +531,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: argExpr, EvalContext: ctx, + Extra: &diagExtra, }) } @@ -530,6 +547,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: &diagExtra, }) } @@ -562,6 +580,39 @@ func (e *FunctionCallExpr) ExprCall() *hcl.StaticCall { return ret } +// FunctionCallDiagExtra is an interface implemented by the value in the "Extra" +// field of some diagnostics returned by FunctionCallExpr.Value, giving +// cooperating callers access to some machine-readable information about the +// call that a diagnostic relates to. +type FunctionCallDiagExtra interface { + // CalledFunctionName returns the name of the function being called at + // the time the diagnostic was generated, if any. Returns an empty string + // if there is no known called function. + CalledFunctionName() string + + // FunctionCallError returns the error value returned by the implementation + // of the function being called, if any. Returns nil if the diagnostic was + // not returned in response to a call error. + // + // Some errors related to calling functions are generated by HCL itself + // rather than by the underlying function, in which case this method + // will return nil. + FunctionCallError() error +} + +type functionCallDiagExtra struct { + calledFunctionName string + functionCallError error +} + +func (e *functionCallDiagExtra) CalledFunctionName() string { + return e.calledFunctionName +} + +func (e *functionCallDiagExtra) FunctionCallError() error { + return e.functionCallError +} + type ConditionalExpr struct { Condition Expression TrueResult Expression diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go index 2b4ba3235e..dc7bc73ec2 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go @@ -119,7 +119,7 @@ func formatSpaces(lines []formatLine) { if i < (len(line.lead) - 1) { after = line.lead[i+1] } else { - after = nilToken + continue } if spaceAfterToken(token, before, after) { after.SpacesBefore = 1 @@ -143,7 +143,7 @@ func formatSpaces(lines []formatLine) { if i < (len(line.assign) - 1) { after = line.assign[i+1] } else { - after = nilToken + continue } if spaceAfterToken(token, before, after) { after.SpacesBefore = 1 diff --git a/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go b/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go index 2b19c59ea6..164c0fd6b6 100644 --- a/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go +++ b/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go @@ -1,6 +1,6 @@ package version -const version = "0.16.1" +const version = "0.17.2" // ModuleVersion returns the current version of the github.com/hashicorp/terraform-exec Go module. // This is a function to allow for future possible enhancement using debug.BuildInfo. diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go index 83abd22d55..56393a006c 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go @@ -1,9 +1,11 @@ package tfexec import ( + "bufio" "bytes" "context" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -17,10 +19,12 @@ import ( const ( checkpointDisableEnvVar = "CHECKPOINT_DISABLE" cliArgsEnvVar = "TF_CLI_ARGS" - logEnvVar = "TF_LOG" inputEnvVar = "TF_INPUT" automationEnvVar = "TF_IN_AUTOMATION" + logEnvVar = "TF_LOG" + logCoreEnvVar = "TF_LOG_CORE" logPathEnvVar = "TF_LOG_PATH" + logProviderEnvVar = "TF_LOG_PROVIDER" reattachEnvVar = "TF_REATTACH_PROVIDERS" appendUserAgentEnvVar = "TF_APPEND_USER_AGENT" workspaceEnvVar = "TF_WORKSPACE" @@ -35,8 +39,10 @@ var prohibitedEnvVars = []string{ cliArgsEnvVar, inputEnvVar, automationEnvVar, - logPathEnvVar, logEnvVar, + logCoreEnvVar, + logPathEnvVar, + logProviderEnvVar, reattachEnvVar, appendUserAgentEnvVar, workspaceEnvVar, @@ -146,11 +152,14 @@ func (tf *Terraform) buildEnv(mergeEnv map[string]string) []string { if tf.logPath == "" { // so logging can't pollute our stderr output env[logEnvVar] = "" + env[logCoreEnvVar] = "" env[logPathEnvVar] = "" + env[logProviderEnvVar] = "" } else { + env[logEnvVar] = tf.log + env[logCoreEnvVar] = tf.logCore env[logPathEnvVar] = tf.logPath - // Log levels other than TRACE are currently unreliable, the CLI recommends using TRACE only. - env[logEnvVar] = "TRACE" + env[logProviderEnvVar] = tf.logProvider } // constant automation override env vars @@ -171,7 +180,7 @@ func (tf *Terraform) buildEnv(mergeEnv map[string]string) []string { } func (tf *Terraform) buildTerraformCmd(ctx context.Context, mergeEnv map[string]string, args ...string) *exec.Cmd { - cmd := exec.Command(tf.execPath, args...) + cmd := exec.CommandContext(ctx, tf.execPath, args...) cmd.Env = tf.buildEnv(mergeEnv) cmd.Dir = tf.workingDir @@ -230,3 +239,36 @@ func mergeWriters(writers ...io.Writer) io.Writer { } return io.MultiWriter(compact...) } + +func writeOutput(ctx context.Context, r io.ReadCloser, w io.Writer) error { + // ReadBytes will block until bytes are read, which can cause a delay in + // returning even if the command's context has been canceled. Use a separate + // goroutine to prompt ReadBytes to return on cancel + closeCtx, closeCancel := context.WithCancel(ctx) + defer closeCancel() + go func() { + select { + case <-ctx.Done(): + r.Close() + case <-closeCtx.Done(): + return + } + }() + + buf := bufio.NewReader(r) + for { + line, err := buf.ReadBytes('\n') + if len(line) > 0 { + if _, err := w.Write(line); err != nil { + return err + } + } + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + return err + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go index 08a65bcde4..6d7b768ee7 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go @@ -7,26 +7,12 @@ import ( "context" "os/exec" "strings" + "sync" ) func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { var errBuf strings.Builder - cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) - cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) - - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { - if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { - err := cmd.Process.Kill() - if err != nil { - tf.logger.Printf("error from kill: %s", err) - } - } - } - }() - // check for early cancellation select { case <-ctx.Done(): @@ -34,7 +20,52 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { default: } - err := cmd.Run() + // Read stdout / stderr logs from pipe instead of setting cmd.Stdout and + // cmd.Stderr because it can cause hanging when killing the command + // https://github.com/golang/go/issues/23019 + stdoutWriter := mergeWriters(cmd.Stdout, tf.stdout) + stderrWriter := mergeWriters(tf.stderr, &errBuf) + + cmd.Stderr = nil + cmd.Stdout = nil + + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return err + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return err + } + + err = cmd.Start() + if err == nil && ctx.Err() != nil { + err = ctx.Err() + } + if err != nil { + return tf.wrapExitError(ctx, err, "") + } + + var errStdout, errStderr error + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + errStdout = writeOutput(ctx, stdoutPipe, stdoutWriter) + }() + + wg.Add(1) + go func() { + defer wg.Done() + errStderr = writeOutput(ctx, stderrPipe, stderrWriter) + }() + + // Reads from pipes must be completed before calling cmd.Wait(). Otherwise + // can cause a race condition + wg.Wait() + + err = cmd.Wait() if err == nil && ctx.Err() != nil { err = ctx.Err() } @@ -42,5 +73,13 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { return tf.wrapExitError(ctx, err, errBuf.String()) } + // Return error if there was an issue reading the std out/err + if errStdout != nil && ctx.Err() != nil { + return tf.wrapExitError(ctx, errStdout, errBuf.String()) + } + if errStderr != nil && ctx.Err() != nil { + return tf.wrapExitError(ctx, errStderr, errBuf.String()) + } + return nil } diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go index 7cbdcb96f1..6fa40e0aa3 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go @@ -4,15 +4,13 @@ import ( "context" "os/exec" "strings" + "sync" "syscall" ) func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { var errBuf strings.Builder - cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) - cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) - cmd.SysProcAttr = &syscall.SysProcAttr{ // kill children if parent is dead Pdeathsig: syscall.SIGKILL, @@ -20,21 +18,6 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { Setpgid: true, } - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { - if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { - // send SIGINT to process group - err := syscall.Kill(-cmd.Process.Pid, syscall.SIGINT) - if err != nil { - tf.logger.Printf("error from SIGINT: %s", err) - } - } - - // TODO: send a kill if it doesn't respond for a bit? - } - }() - // check for early cancellation select { case <-ctx.Done(): @@ -42,7 +25,52 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { default: } - err := cmd.Run() + // Read stdout / stderr logs from pipe instead of setting cmd.Stdout and + // cmd.Stderr because it can cause hanging when killing the command + // https://github.com/golang/go/issues/23019 + stdoutWriter := mergeWriters(cmd.Stdout, tf.stdout) + stderrWriter := mergeWriters(tf.stderr, &errBuf) + + cmd.Stderr = nil + cmd.Stdout = nil + + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return err + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return err + } + + err = cmd.Start() + if err == nil && ctx.Err() != nil { + err = ctx.Err() + } + if err != nil { + return tf.wrapExitError(ctx, err, "") + } + + var errStdout, errStderr error + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + errStdout = writeOutput(ctx, stdoutPipe, stdoutWriter) + }() + + wg.Add(1) + go func() { + defer wg.Done() + errStderr = writeOutput(ctx, stderrPipe, stderrWriter) + }() + + // Reads from pipes must be completed before calling cmd.Wait(). Otherwise + // can cause a race condition + wg.Wait() + + err = cmd.Wait() if err == nil && ctx.Err() != nil { err = ctx.Err() } @@ -50,5 +78,13 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { return tf.wrapExitError(ctx, err, errBuf.String()) } + // Return error if there was an issue reading the std out/err + if errStdout != nil && ctx.Err() != nil { + return tf.wrapExitError(ctx, errStdout, errBuf.String()) + } + if errStderr != nil && ctx.Err() != nil { + return tf.wrapExitError(ctx, errStderr, errBuf.String()) + } + return nil } diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go new file mode 100644 index 0000000000..11b6b9c77f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go @@ -0,0 +1,55 @@ +package tfexec + +import ( + "bytes" + "context" + "os/exec" +) + +type statePullConfig struct { + reattachInfo ReattachInfo +} + +var defaultStatePullConfig = statePullConfig{} + +type StatePullOption interface { + configureShow(*statePullConfig) +} + +func (opt *ReattachOption) configureStatePull(conf *statePullConfig) { + conf.reattachInfo = opt.info +} + +func (tf *Terraform) StatePull(ctx context.Context, opts ...StatePullOption) (string, error) { + c := defaultStatePullConfig + + for _, o := range opts { + o.configureShow(&c) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return "", err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + cmd := tf.statePullCmd(ctx, mergeEnv) + + var ret bytes.Buffer + cmd.Stdout = &ret + err := tf.runTerraformCmd(ctx, cmd) + if err != nil { + return "", err + } + + return ret.String(), nil +} + +func (tf *Terraform) statePullCmd(ctx context.Context, mergeEnv map[string]string) *exec.Cmd { + args := []string{"state", "pull"} + + return tf.buildTerraformCmd(ctx, mergeEnv, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go new file mode 100644 index 0000000000..14e55a2eb3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go @@ -0,0 +1,67 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type statePushConfig struct { + force bool + lock bool + lockTimeout string +} + +var defaultStatePushOptions = statePushConfig{ + lock: false, + lockTimeout: "0s", +} + +// StatePushCmdOption represents options used in the Refresh method. +type StatePushCmdOption interface { + configureStatePush(*statePushConfig) +} + +func (opt *ForceOption) configureStatePush(conf *statePushConfig) { + conf.force = opt.force +} + +func (opt *LockOption) configureStatePush(conf *statePushConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureStatePush(conf *statePushConfig) { + conf.lockTimeout = opt.timeout +} + +func (tf *Terraform) StatePush(ctx context.Context, path string, opts ...StatePushCmdOption) error { + cmd, err := tf.statePushCmd(ctx, path, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) statePushCmd(ctx context.Context, path string, opts ...StatePushCmdOption) (*exec.Cmd, error) { + c := defaultStatePushOptions + + for _, o := range opts { + o.configureStatePush(&c) + } + + args := []string{"state", "push"} + + if c.force { + args = append(args, "-force") + } + + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + + args = append(args, path) + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go index 2ad143a41a..bb8be17d58 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go @@ -48,11 +48,22 @@ type Terraform struct { skipProviderVerify bool env map[string]string - stdout io.Writer - stderr io.Writer - logger printfer + stdout io.Writer + stderr io.Writer + logger printfer + + // TF_LOG environment variable, defaults to TRACE if logPath is set. + log string + + // TF_LOG_CORE environment variable + logCore string + + // TF_LOG_PATH environment variable logPath string + // TF_LOG_PROVIDER environment variable + logProvider string + versionLock sync.Mutex execVersion *version.Version provVersions map[string]*version.Version @@ -122,10 +133,58 @@ func (tf *Terraform) SetStderr(w io.Writer) { tf.stderr = w } +// SetLog sets the TF_LOG environment variable for Terraform CLI execution. +// This must be combined with a call to SetLogPath to take effect. +// +// This is only compatible with Terraform CLI 0.15.0 or later as setting the +// log level was unreliable in earlier versions. It will default to TRACE when +// SetLogPath is called on versions 0.14.11 and earlier, or if SetLogCore and +// SetLogProvider have not been called before SetLogPath on versions 0.15.0 and +// later. +func (tf *Terraform) SetLog(log string) error { + err := tf.compatible(context.Background(), tf0_15_0, nil) + if err != nil { + return err + } + tf.log = log + return nil +} + +// SetLogCore sets the TF_LOG_CORE environment variable for Terraform CLI +// execution. This must be combined with a call to SetLogPath to take effect. +// +// This is only compatible with Terraform CLI 0.15.0 or later. +func (tf *Terraform) SetLogCore(logCore string) error { + err := tf.compatible(context.Background(), tf0_15_0, nil) + if err != nil { + return err + } + tf.logCore = logCore + return nil +} + // SetLogPath sets the TF_LOG_PATH environment variable for Terraform CLI // execution. func (tf *Terraform) SetLogPath(path string) error { tf.logPath = path + // Prevent setting the log path without enabling logging + if tf.log == "" && tf.logCore == "" && tf.logProvider == "" { + tf.log = "TRACE" + } + return nil +} + +// SetLogProvider sets the TF_LOG_PROVIDER environment variable for Terraform +// CLI execution. This must be combined with a call to SetLogPath to take +// effect. +// +// This is only compatible with Terraform CLI 0.15.0 or later. +func (tf *Terraform) SetLogProvider(logProvider string) error { + err := tf.compatible(context.Background(), tf0_15_0, nil) + if err != nil { + return err + } + tf.logProvider = logProvider return nil } diff --git a/vendor/github.com/hashicorp/terraform-json/config.go b/vendor/github.com/hashicorp/terraform-json/config.go index e093cfa8bf..5ebe4bc840 100644 --- a/vendor/github.com/hashicorp/terraform-json/config.go +++ b/vendor/github.com/hashicorp/terraform-json/config.go @@ -48,6 +48,9 @@ type ProviderConfig struct { // The name of the provider, ie: "aws". Name string `json:"name,omitempty"` + // The fully-specified name of the provider, ie: "registry.terraform.io/hashicorp/aws". + FullName string `json:"full_name,omitempty"` + // The alias of the provider, ie: "us-east-1". Alias string `json:"alias,omitempty"` diff --git a/vendor/github.com/hashicorp/terraform-json/plan.go b/vendor/github.com/hashicorp/terraform-json/plan.go index 1de5bc82a0..274006a018 100644 --- a/vendor/github.com/hashicorp/terraform-json/plan.go +++ b/vendor/github.com/hashicorp/terraform-json/plan.go @@ -55,6 +55,19 @@ type Plan struct { // The Terraform configuration used to make the plan. Config *Config `json:"configuration,omitempty"` + + // RelevantAttributes represents any resource instances and their + // attributes which may have contributed to the planned changes + RelevantAttributes []ResourceAttribute `json:"relevant_attributes,omitempty"` +} + +// ResourceAttribute describes a full path to a resource attribute +type ResourceAttribute struct { + // Resource describes resource instance address (e.g. null_resource.foo) + Resource string `json:"resource"` + // Attribute describes the attribute path using a lossy representation + // of cty.Path. (e.g. ["id"] or ["objects", 0, "val"]). + Attribute []json.RawMessage `json:"attribute"` } // Validate checks to ensure that the plan is present, and the diff --git a/vendor/github.com/hashicorp/terraform-json/schemas.go b/vendor/github.com/hashicorp/terraform-json/schemas.go index 2360231d00..027224b620 100644 --- a/vendor/github.com/hashicorp/terraform-json/schemas.go +++ b/vendor/github.com/hashicorp/terraform-json/schemas.go @@ -223,6 +223,13 @@ type SchemaAttribute struct { Sensitive bool `json:"sensitive,omitempty"` } +// jsonSchemaAttribute describes an attribute within a schema block +// in a middle-step internal representation before marshalled into +// a more useful SchemaAttribute with cty.Type. +// +// This avoid panic on marshalling cty.NilType (from cty upstream) +// which the default Go marshaller cannot ignore because it's a +// not nil-able struct. type jsonSchemaAttribute struct { AttributeType json.RawMessage `json:"type,omitempty"` AttributeNestedType *SchemaNestedAttributeType `json:"nested_type,omitempty"` diff --git a/vendor/github.com/hashicorp/terraform-json/state.go b/vendor/github.com/hashicorp/terraform-json/state.go index bece5c2202..3c3f6a4b0a 100644 --- a/vendor/github.com/hashicorp/terraform-json/state.go +++ b/vendor/github.com/hashicorp/terraform-json/state.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/hashicorp/go-version" + "github.com/zclconf/go-cty/cty" ) // StateFormatVersionConstraints defines the versions of the JSON state format @@ -175,4 +176,31 @@ type StateOutput struct { // The value of the output. Value interface{} `json:"value,omitempty"` + + // The type of the output. + Type cty.Type `json:"type,omitempty"` +} + +// jsonStateOutput describes an output value in a middle-step internal +// representation before marshalled into a more useful StateOutput with cty.Type. +// +// This avoid panic on marshalling cty.NilType (from cty upstream) +// which the default Go marshaller cannot ignore because it's a +// not nil-able struct. +type jsonStateOutput struct { + Sensitive bool `json:"sensitive"` + Value interface{} `json:"value,omitempty"` + Type json.RawMessage `json:"type,omitempty"` +} + +func (so *StateOutput) MarshalJSON() ([]byte, error) { + jsonSa := &jsonStateOutput{ + Sensitive: so.Sensitive, + Value: so.Value, + } + if so.Type != cty.NilType { + outputType, _ := so.Type.MarshalJSON() + jsonSa.Type = outputType + } + return json.Marshal(jsonSa) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go index a35258a8ef..38ef4e35e2 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go @@ -22,12 +22,19 @@ func InitContext(ctx context.Context, sdkOpts tfsdklog.Options, providerOpts tfl ctx = tfsdklog.NewRootSDKLogger(ctx, append(tfsdklog.Options{ tfsdklog.WithLevelFromEnv(EnvTfLogSdk), }, sdkOpts...)...) + ctx = ProtoSubsystemContext(ctx, sdkOpts) + ctx = tfsdklog.NewRootProviderLogger(ctx, providerOpts...) + + return ctx +} + +// ProtoSubsystemContext adds the proto subsystem to the SDK logger context. +func ProtoSubsystemContext(ctx context.Context, sdkOpts tfsdklog.Options) context.Context { ctx = tfsdklog.NewSubsystem(ctx, SubsystemProto, append(tfsdklog.Options{ // All calls are through the Protocol* helper functions tfsdklog.WithAdditionalLocationOffset(1), tfsdklog.WithLevelFromEnv(EnvTfLogSdkProto), }, sdkOpts...)...) - ctx = tfsdklog.NewRootProviderLogger(ctx, providerOpts...) return ctx } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go index 3d7af9611e..ce803d752b 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go @@ -5,9 +5,30 @@ package logging // Practitioners or tooling reading logs may be depending on these keys, so be // conscious of that when changing them. const ( + // Attribute of the diagnostic being logged. + KeyDiagnosticAttribute = "diagnostic_attribute" + + // Number of the error diagnostics. + KeyDiagnosticErrorCount = "diagnostic_error_count" + + // Severity of the diagnostic being logged. + KeyDiagnosticSeverity = "diagnostic_severity" + + // Detail of the diagnostic being logged. + KeyDiagnosticDetail = "diagnostic_detail" + + // Summary of the diagnostic being logged. + KeyDiagnosticSummary = "diagnostic_summary" + + // Number of the warning diagnostics. + KeyDiagnosticWarningCount = "diagnostic_warning_count" + // Underlying error string KeyError = "error" + // Duration in milliseconds for the RPC request + KeyRequestDurationMs = "tf_req_duration_ms" + // A unique ID for the RPC request KeyRequestID = "tf_req_id" diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go index 9f9c2808c6..21a392a3ca 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go @@ -16,6 +16,11 @@ func ProtocolError(ctx context.Context, msg string, additionalFields ...map[stri tfsdklog.SubsystemError(ctx, SubsystemProto, msg, additionalFields...) } +// ProtocolWarn emits a protocol subsystem log at WARN level. +func ProtocolWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemProto, msg, additionalFields...) +} + // ProtocolTrace emits a protocol subsystem log at TRACE level. func ProtocolTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { tfsdklog.SubsystemTrace(ctx, SubsystemProto, msg, additionalFields...) diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go index 6c68504c1f..b40763c6e1 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go @@ -8,8 +8,7 @@ import ( ) func ProviderLoggerName(providerAddress string) string { - provider, err := tfaddr.ParseRawProviderSourceString(providerAddress) - + provider, err := tfaddr.ParseProviderSource(providerAddress) if err != nil { log.Printf("[ERROR] Error parsing provider name %q: %s", providerAddress, err) return "" diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go new file mode 100644 index 0000000000..1032f7d4fa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go @@ -0,0 +1,82 @@ +package diag + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// Diagnostics is a collection of Diagnostic. +type Diagnostics []*tfprotov5.Diagnostic + +// ErrorCount returns the number of error severity diagnostics. +func (d Diagnostics) ErrorCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov5.DiagnosticSeverityError { + continue + } + + result++ + } + + return result +} + +// Log will log every diagnostic: +// +// - Error severity at ERROR level +// - Warning severity at WARN level +// - Invalid/Unknown severity at WARN level +// +func (d Diagnostics) Log(ctx context.Context) { + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + diagnosticFields := map[string]interface{}{ + logging.KeyDiagnosticDetail: diagnostic.Detail, + logging.KeyDiagnosticSeverity: diagnostic.Severity.String(), + logging.KeyDiagnosticSummary: diagnostic.Summary, + } + + if diagnostic.Attribute != nil { + diagnosticFields[logging.KeyDiagnosticAttribute] = diagnostic.Attribute.String() + } + + switch diagnostic.Severity { + case tfprotov5.DiagnosticSeverityError: + logging.ProtocolError(ctx, "Response contains error diagnostic", diagnosticFields) + case tfprotov5.DiagnosticSeverityWarning: + logging.ProtocolWarn(ctx, "Response contains warning diagnostic", diagnosticFields) + default: + logging.ProtocolWarn(ctx, "Response contains unknown diagnostic", diagnosticFields) + } + } +} + +// WarningCount returns the number of warning severity diagnostics. +func (d Diagnostics) WarningCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov5.DiagnosticSeverityWarning { + continue + } + + result++ + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go new file mode 100644 index 0000000000..0c73dab12f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go @@ -0,0 +1,3 @@ +// Package diag contains diagnostics helpers. These implementations are +// intentionally outside the public API. +package diag diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go new file mode 100644 index 0000000000..cc72fe4bbf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go @@ -0,0 +1,8 @@ +package tf5serverlogging + +// Context key types. +// Reference: https://staticcheck.io/docs/checks/#SA1029 + +// ContextKeyDownstreamRequestStartTime is a context.Context key to store the +// time.Time when the server began a downstream request. +type ContextKeyDownstreamRequestStartTime struct{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go new file mode 100644 index 0000000000..e77a831c03 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go @@ -0,0 +1,3 @@ +// Package tf5serverlogging contains logging functionality specific to +// tf5server and tfprotov5 types. +package tf5serverlogging diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go new file mode 100644 index 0000000000..eeec76e8c9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go @@ -0,0 +1,40 @@ +package tf5serverlogging + +import ( + "context" + "time" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag" +) + +// DownstreamRequest sets a request duration start time context key and +// generates a TRACE "Sending request downstream" log. +func DownstreamRequest(ctx context.Context) context.Context { + requestStart := time.Now() + ctx = context.WithValue(ctx, ContextKeyDownstreamRequestStartTime{}, requestStart) + + logging.ProtocolTrace(ctx, "Sending request downstream") + + return ctx +} + +// DownstreamResponse generates the following logging: +// +// - TRACE "Received downstream response" log with request duration and +// diagnostic severity counts +// - Per-diagnostic logs +// +func DownstreamResponse(ctx context.Context, diagnostics diag.Diagnostics) { + responseFields := map[string]interface{}{ + logging.KeyDiagnosticErrorCount: diagnostics.ErrorCount(), + logging.KeyDiagnosticWarningCount: diagnostics.WarningCount(), + } + + if requestStart, ok := ctx.Value(ContextKeyDownstreamRequestStartTime{}).(time.Time); ok { + responseFields[logging.KeyRequestDurationMs] = time.Since(requestStart).Milliseconds() + } + + logging.ProtocolTrace(ctx, "Received downstream response", responseFields) + diagnostics.Log(ctx) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go index 5f95499654..b86a045e95 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go @@ -128,6 +128,9 @@ type ReadResourceRequest struct { // Private is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the ReadResourceResponse type Private field. Private []byte // ProviderMeta supplies the provider metadata configuration for the @@ -215,6 +218,9 @@ type PlanResourceChangeRequest struct { // PriorPrivate is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the PlanResourceChangeResponse type PlannedPrivate field. PriorPrivate []byte // ProviderMeta supplies the provider metadata configuration for the @@ -283,6 +289,10 @@ type PlanResourceChangeResponse struct { // like sent with requests for this resource. This state will be // associated with the resource, but will not be considered when // calculating diffs. + // + // This private state data will be sent in the ApplyResourceChange RPC, in + // relation to the types of this package, the ApplyResourceChangeRequest + // type PlannedPrivate field. PlannedPrivate []byte // Diagnostics report errors or warnings related to determining the @@ -344,6 +354,13 @@ type ApplyResourceChangeRequest struct { // PlannedPrivate is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // This private state data is sourced from the PlanResourceChange RPC, in + // relation to the types in this package, the PlanResourceChangeResponse + // type PlannedPrivate field. + // + // To ensure private state data is preserved, copy any necessary data to + // the ApplyResourceChangeResponse type Private field. PlannedPrivate []byte // ProviderMeta supplies the provider metadata configuration for the diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go index 9ec7abefb1..3b48ae6c98 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-go/internal/logging" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging" "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto" "google.golang.org/grpc" @@ -494,13 +495,13 @@ func (s *server) GetSchema(ctx context.Context, req *tfplugin5.GetProviderSchema logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.GetProviderSchema(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.GetProviderSchema_Response(resp) if err != nil { logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) @@ -522,13 +523,13 @@ func (s *server) PrepareProviderConfig(ctx context.Context, req *tfplugin5.Prepa return nil, err } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.PrepareProviderConfig(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PreparedConfig", resp.PreparedConfig) ret, err := toproto.PrepareProviderConfig_Response(resp) if err != nil { @@ -551,13 +552,13 @@ func (s *server) Configure(ctx context.Context, req *tfplugin5.Configure_Request return nil, err } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ConfigureProvider(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.Configure_Response(resp) if err != nil { logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) @@ -591,13 +592,13 @@ func (s *server) Stop(ctx context.Context, req *tfplugin5.Stop_Request) (*tfplug logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.StopProvider(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, nil) logging.ProtocolTrace(ctx, "Closing all our contexts") s.stop() logging.ProtocolTrace(ctx, "Closed all our contexts") @@ -623,13 +624,13 @@ func (s *server) ValidateDataSourceConfig(ctx context.Context, req *tfplugin5.Va return nil, err } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ValidateDataSourceConfig(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.ValidateDataSourceConfig_Response(resp) if err != nil { logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) @@ -653,13 +654,13 @@ func (s *server) ReadDataSource(ctx context.Context, req *tfplugin5.ReadDataSour } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ReadDataSource(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "State", resp.State) ret, err := toproto.ReadDataSource_Response(resp) if err != nil { @@ -683,13 +684,13 @@ func (s *server) ValidateResourceTypeConfig(ctx context.Context, req *tfplugin5. return nil, err } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ValidateResourceTypeConfig(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.ValidateResourceTypeConfig_Response(resp) if err != nil { logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) @@ -711,13 +712,13 @@ func (s *server) UpgradeResourceState(ctx context.Context, req *tfplugin5.Upgrad logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.UpgradeResourceState(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "UpgradedState", resp.UpgradedState) ret, err := toproto.UpgradeResourceState_Response(resp) if err != nil { @@ -742,13 +743,13 @@ func (s *server) ReadResource(ctx context.Context, req *tfplugin5.ReadResource_R } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "CurrentState", r.CurrentState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ReadResource(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ReadResource_Response(resp) if err != nil { @@ -775,13 +776,13 @@ func (s *server) PlanResourceChange(ctx context.Context, req *tfplugin5.PlanReso logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", r.PriorState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProposedNewState", r.ProposedNewState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.PlanResourceChange(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PlannedState", resp.PlannedState) ret, err := toproto.PlanResourceChange_Response(resp) if err != nil { @@ -808,13 +809,13 @@ func (s *server) ApplyResourceChange(ctx context.Context, req *tfplugin5.ApplyRe logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PlannedState", r.PlannedState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ApplyResourceChange(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ApplyResourceChange_Response(resp) if err != nil { @@ -837,13 +838,13 @@ func (s *server) ImportResourceState(ctx context.Context, req *tfplugin5.ImportR logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf5serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ImportResourceState(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) for _, importedResource := range resp.ImportedResources { logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "State", importedResource.State) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go new file mode 100644 index 0000000000..543a36232f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go @@ -0,0 +1,82 @@ +package diag + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// Diagnostics is a collection of Diagnostic. +type Diagnostics []*tfprotov6.Diagnostic + +// ErrorCount returns the number of error severity diagnostics. +func (d Diagnostics) ErrorCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov6.DiagnosticSeverityError { + continue + } + + result++ + } + + return result +} + +// Log will log every diagnostic: +// +// - Error severity at ERROR level +// - Warning severity at WARN level +// - Invalid/Unknown severity at WARN level +// +func (d Diagnostics) Log(ctx context.Context) { + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + diagnosticFields := map[string]interface{}{ + logging.KeyDiagnosticDetail: diagnostic.Detail, + logging.KeyDiagnosticSeverity: diagnostic.Severity.String(), + logging.KeyDiagnosticSummary: diagnostic.Summary, + } + + if diagnostic.Attribute != nil { + diagnosticFields[logging.KeyDiagnosticAttribute] = diagnostic.Attribute.String() + } + + switch diagnostic.Severity { + case tfprotov6.DiagnosticSeverityError: + logging.ProtocolError(ctx, "Response contains error diagnostic", diagnosticFields) + case tfprotov6.DiagnosticSeverityWarning: + logging.ProtocolWarn(ctx, "Response contains warning diagnostic", diagnosticFields) + default: + logging.ProtocolWarn(ctx, "Response contains unknown diagnostic", diagnosticFields) + } + } +} + +// WarningCount returns the number of warning severity diagnostics. +func (d Diagnostics) WarningCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov6.DiagnosticSeverityWarning { + continue + } + + result++ + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/doc.go new file mode 100644 index 0000000000..0c73dab12f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/doc.go @@ -0,0 +1,3 @@ +// Package diag contains diagnostics helpers. These implementations are +// intentionally outside the public API. +package diag diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/context_keys.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/context_keys.go new file mode 100644 index 0000000000..15386cd2cc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/context_keys.go @@ -0,0 +1,8 @@ +package tf6serverlogging + +// Context key types. +// Reference: https://staticcheck.io/docs/checks/#SA1029 + +// ContextKeyDownstreamRequestStartTime is a context.Context key to store the +// time.Time when the server began a downstream request. +type ContextKeyDownstreamRequestStartTime struct{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/doc.go new file mode 100644 index 0000000000..167a61825a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/doc.go @@ -0,0 +1,3 @@ +// Package tf5serverlogging contains logging functionality specific to +// tf5server and tfprotov5 types. +package tf6serverlogging diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/downstream_request.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/downstream_request.go new file mode 100644 index 0000000000..c47df9b457 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/downstream_request.go @@ -0,0 +1,40 @@ +package tf6serverlogging + +import ( + "context" + "time" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag" +) + +// DownstreamRequest sets a request duration start time context key and +// generates a TRACE "Sending request downstream" log. +func DownstreamRequest(ctx context.Context) context.Context { + requestStart := time.Now() + ctx = context.WithValue(ctx, ContextKeyDownstreamRequestStartTime{}, requestStart) + + logging.ProtocolTrace(ctx, "Sending request downstream") + + return ctx +} + +// DownstreamResponse generates the following logging: +// +// - TRACE "Received downstream response" log with request duration and +// diagnostic severity counts +// - Per-diagnostic logs +// +func DownstreamResponse(ctx context.Context, diagnostics diag.Diagnostics) { + responseFields := map[string]interface{}{ + logging.KeyDiagnosticErrorCount: diagnostics.ErrorCount(), + logging.KeyDiagnosticWarningCount: diagnostics.WarningCount(), + } + + if requestStart, ok := ctx.Value(ContextKeyDownstreamRequestStartTime{}).(time.Time); ok { + responseFields[logging.KeyRequestDurationMs] = time.Since(requestStart).Milliseconds() + } + + logging.ProtocolTrace(ctx, "Received downstream response", responseFields) + diagnostics.Log(ctx) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go index 3250f19548..2768bb526e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go @@ -125,6 +125,9 @@ type ReadResourceRequest struct { // Private is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the ReadResourceResponse type Private field. Private []byte // ProviderMeta supplies the provider metadata configuration for the @@ -212,6 +215,9 @@ type PlanResourceChangeRequest struct { // PriorPrivate is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the PlanResourceChangeResponse type PlannedPrivate field. PriorPrivate []byte // ProviderMeta supplies the provider metadata configuration for the @@ -280,6 +286,10 @@ type PlanResourceChangeResponse struct { // like sent with requests for this resource. This state will be // associated with the resource, but will not be considered when // calculating diffs. + // + // This private state data will be sent in the ApplyResourceChange RPC, in + // relation to the types of this package, the ApplyResourceChangeRequest + // type PlannedPrivate field. PlannedPrivate []byte // Diagnostics report errors or warnings related to determining the @@ -341,6 +351,13 @@ type ApplyResourceChangeRequest struct { // PlannedPrivate is any provider-defined private state stored with the // resource. It is used for keeping state with the resource that is not // meant to be included when calculating diffs. + // + // This private state data is sourced from the PlanResourceChange RPC, in + // relation to the types in this package, the PlanResourceChangeResponse + // type PlannedPrivate field. + // + // To ensure private state data is preserved, copy any necessary data to + // the ApplyResourceChangeResponse type Private field. PlannedPrivate []byte // ProviderMeta supplies the provider metadata configuration for the diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go index 4ed9ece604..d4369e21fc 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-go/internal/logging" "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging" "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto" "google.golang.org/grpc" @@ -494,13 +495,13 @@ func (s *server) GetProviderSchema(ctx context.Context, req *tfplugin6.GetProvid logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.GetProviderSchema(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.GetProviderSchema_Response(resp) if err != nil { logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) @@ -522,13 +523,13 @@ func (s *server) ConfigureProvider(ctx context.Context, req *tfplugin6.Configure return nil, err } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ConfigureProvider(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.Configure_Response(resp) if err != nil { logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) @@ -549,13 +550,13 @@ func (s *server) ValidateProviderConfig(ctx context.Context, req *tfplugin6.Vali return nil, err } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ValidateProviderConfig(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.ValidateProviderConfig_Response(resp) if err != nil { logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) @@ -589,13 +590,13 @@ func (s *server) Stop(ctx context.Context, req *tfplugin6.StopProvider_Request) logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.StopProvider(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, nil) logging.ProtocolTrace(ctx, "Closing all our contexts") s.stop() logging.ProtocolTrace(ctx, "Closed all our contexts") @@ -621,13 +622,13 @@ func (s *server) ValidateDataResourceConfig(ctx context.Context, req *tfplugin6. return nil, err } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ValidateDataResourceConfig(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.ValidateDataResourceConfig_Response(resp) if err != nil { logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) @@ -651,13 +652,13 @@ func (s *server) ReadDataSource(ctx context.Context, req *tfplugin6.ReadDataSour } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ReadDataSource(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "State", resp.State) ret, err := toproto.ReadDataSource_Response(resp) if err != nil { @@ -681,13 +682,13 @@ func (s *server) ValidateResourceConfig(ctx context.Context, req *tfplugin6.Vali return nil, err } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ValidateResourceConfig(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) ret, err := toproto.ValidateResourceConfig_Response(resp) if err != nil { logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]interface{}{logging.KeyError: err}) @@ -709,13 +710,13 @@ func (s *server) UpgradeResourceState(ctx context.Context, req *tfplugin6.Upgrad logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.UpgradeResourceState(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "UpgradedState", resp.UpgradedState) ret, err := toproto.UpgradeResourceState_Response(resp) if err != nil { @@ -740,13 +741,13 @@ func (s *server) ReadResource(ctx context.Context, req *tfplugin6.ReadResource_R } logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "CurrentState", r.CurrentState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ReadResource(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ReadResource_Response(resp) if err != nil { @@ -773,13 +774,13 @@ func (s *server) PlanResourceChange(ctx context.Context, req *tfplugin6.PlanReso logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", r.PriorState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProposedNewState", r.ProposedNewState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", r.ProviderMeta) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.PlanResourceChange(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PlannedState", resp.PlannedState) ret, err := toproto.PlanResourceChange_Response(resp) if err != nil { @@ -806,13 +807,13 @@ func (s *server) ApplyResourceChange(ctx context.Context, req *tfplugin6.ApplyRe logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PlannedState", r.PlannedState) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", r.Config) - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ApplyResourceChange(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) ret, err := toproto.ApplyResourceChange_Response(resp) if err != nil { @@ -835,13 +836,13 @@ func (s *server) ImportResourceState(ctx context.Context, req *tfplugin6.ImportR logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Calling downstream") + ctx = tf6serverlogging.DownstreamRequest(ctx) resp, err := s.downstream.ImportResourceState(ctx, r) if err != nil { logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) return nil, err } - logging.ProtocolTrace(ctx, "Called downstream") + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) for _, importedResource := range resp.ImportedResources { logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "State", importedResource.State) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go index c2c26feb5e..4d7b749894 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go @@ -440,7 +440,7 @@ func (val Value) As(dst interface{}) error { if !ok { return fmt.Errorf("can't unmarshal %s into %T, expected *big.Float", val.Type(), dst) } - target.Set(v) + target.Copy(v) return nil case **big.Float: if val.IsNull() { diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go index 40857da754..c4047aeab2 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go @@ -74,7 +74,7 @@ func msgpackUnmarshal(dec *msgpack.Decoder, typ Type, path *AttributePath) (Valu if err != nil { return Value{}, path.NewErrorf("couldn't decode number as int64: %w", err) } - return NewValue(Number, big.NewFloat(float64(rv))), nil + return NewValue(Number, new(big.Float).SetInt64(rv)), nil } switch peek { case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: @@ -82,13 +82,13 @@ func msgpackUnmarshal(dec *msgpack.Decoder, typ Type, path *AttributePath) (Valu if err != nil { return Value{}, path.NewErrorf("couldn't decode number as int64: %w", err) } - return NewValue(Number, big.NewFloat(float64(rv))), nil + return NewValue(Number, new(big.Float).SetInt64(rv)), nil case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: rv, err := dec.DecodeUint64() if err != nil { return Value{}, path.NewErrorf("couldn't decode number as uint64: %w", err) } - return NewValue(Number, big.NewFloat(float64(rv))), nil + return NewValue(Number, new(big.Float).SetUint64(rv)), nil case msgpackCodes.Float, msgpackCodes.Double: rv, err := dec.DecodeFloat64() if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go index d9bc172ea9..9e52348d69 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go @@ -19,17 +19,108 @@ import ( testing "github.com/mitchellh/go-testing-interface" ) +// protov5ProviderFactory is a function which is called to start a protocol +// version 5 provider server. +type protov5ProviderFactory func() (tfprotov5.ProviderServer, error) + +// protov5ProviderFactories is a mapping of provider addresses to provider +// factory for protocol version 5 provider servers. +type protov5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) + +// merge combines provider factories. +// +// In case of an overlapping entry, the later entry will overwrite the previous +// value. +func (pf protov5ProviderFactories) merge(otherPfs ...protov5ProviderFactories) protov5ProviderFactories { + result := make(protov5ProviderFactories) + + for name, providerFactory := range pf { + result[name] = providerFactory + } + + for _, otherPf := range otherPfs { + for name, providerFactory := range otherPf { + result[name] = providerFactory + } + } + + return result +} + +// protov6ProviderFactory is a function which is called to start a protocol +// version 6 provider server. +type protov6ProviderFactory func() (tfprotov6.ProviderServer, error) + +// protov6ProviderFactories is a mapping of provider addresses to provider +// factory for protocol version 6 provider servers. +type protov6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) + +// merge combines provider factories. +// +// In case of an overlapping entry, the later entry will overwrite the previous +// value. +func (pf protov6ProviderFactories) merge(otherPfs ...protov6ProviderFactories) protov6ProviderFactories { + result := make(protov6ProviderFactories) + + for name, providerFactory := range pf { + result[name] = providerFactory + } + + for _, otherPf := range otherPfs { + for name, providerFactory := range otherPf { + result[name] = providerFactory + } + } + + return result +} + +// sdkProviderFactory is a function which is called to start a SDK provider +// server. +type sdkProviderFactory func() (*schema.Provider, error) + +// protov6ProviderFactories is a mapping of provider addresses to provider +// factory for protocol version 6 provider servers. +type sdkProviderFactories map[string]func() (*schema.Provider, error) + +// merge combines provider factories. +// +// In case of an overlapping entry, the later entry will overwrite the previous +// value. +func (pf sdkProviderFactories) merge(otherPfs ...sdkProviderFactories) sdkProviderFactories { + result := make(sdkProviderFactories) + + for name, providerFactory := range pf { + result[name] = providerFactory + } + + for _, otherPf := range otherPfs { + for name, providerFactory := range otherPf { + result[name] = providerFactory + } + } + + return result +} + type providerFactories struct { - legacy map[string]func() (*schema.Provider, error) - protov5 map[string]func() (tfprotov5.ProviderServer, error) - protov6 map[string]func() (tfprotov6.ProviderServer, error) + legacy sdkProviderFactories + protov5 protov5ProviderFactories + protov6 protov6ProviderFactories } -func runProviderCommand(ctx context.Context, t testing.T, f func() error, wd *plugintest.WorkingDir, factories providerFactories) error { +func runProviderCommand(ctx context.Context, t testing.T, f func() error, wd *plugintest.WorkingDir, factories *providerFactories) error { // don't point to this as a test failure location // point to whatever called it t.Helper() + // This should not happen, but prevent panics just in case. + if factories == nil { + err := fmt.Errorf("Provider factories are missing to run Terraform command. Please report this bug in the testing framework.") + logging.HelperResourceError(ctx, err.Error()) + return err + } + // Run the providers in the same process as the test runner using the // reattach behavior in Terraform. This ensures we get test coverage // and enables the use of delve as a debugger. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go new file mode 100644 index 0000000000..c09e4657cc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go @@ -0,0 +1,56 @@ +package resource + +import ( + "context" + "fmt" + "strings" +) + +// providerConfig takes the list of providers in a TestCase and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func (c TestCase) providerConfig(_ context.Context) string { + var providerBlocks, requiredProviderBlocks strings.Builder + + // [BF] The Providers field handling predates the logic being moved to this + // method. It's not entirely clear to me at this time why this field + // is being used and not the others, but leaving it here just in case + // it does have a special purpose that wasn't being unit tested prior. + for name := range c.Providers { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + } + + for name, externalProvider := range c.ExternalProviders { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + + if externalProvider.Source == "" && externalProvider.VersionConstraint == "" { + continue + } + + requiredProviderBlocks.WriteString(fmt.Sprintf(" %s = {\n", name)) + + if externalProvider.Source != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" source = %q\n", externalProvider.Source)) + } + + if externalProvider.VersionConstraint != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" version = %q\n", externalProvider.VersionConstraint)) + } + + requiredProviderBlocks.WriteString(" }\n") + } + + if requiredProviderBlocks.Len() > 0 { + return fmt.Sprintf(` +terraform { + required_providers { +%[1]s + } +} + +%[2]s +`, strings.TrimSuffix(requiredProviderBlocks.String(), "\n"), providerBlocks.String()) + } + + return providerBlocks.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go new file mode 100644 index 0000000000..39e5da46c9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go @@ -0,0 +1,85 @@ +package resource + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +// hasProviders returns true if the TestCase has set any of the +// ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, +// ProviderFactories, or Providers fields. +func (c TestCase) hasProviders(_ context.Context) bool { + if len(c.ExternalProviders) > 0 { + return true + } + + if len(c.ProtoV5ProviderFactories) > 0 { + return true + } + + if len(c.ProtoV6ProviderFactories) > 0 { + return true + } + + if len(c.ProviderFactories) > 0 { + return true + } + + if len(c.Providers) > 0 { + return true + } + + return false +} + +// validate ensures the TestCase is valid based on the following criteria: +// +// - No overlapping ExternalProviders and Providers entries +// - No overlapping ExternalProviders and ProviderFactories entries +// - TestStep validations performed by the (TestStep).validate() method. +// +func (c TestCase) validate(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Validating TestCase") + + if len(c.Steps) == 0 { + err := fmt.Errorf("TestCase missing Steps") + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + for name := range c.ExternalProviders { + if _, ok := c.Providers[name]; ok { + err := fmt.Errorf("TestCase provider %q set in both ExternalProviders and Providers", name) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if _, ok := c.ProviderFactories[name]; ok { + err := fmt.Errorf("TestCase provider %q set in both ExternalProviders and ProviderFactories", name) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + testCaseHasProviders := c.hasProviders(ctx) + + for stepIndex, step := range c.Steps { + stepNumber := stepIndex + 1 // Use 1-based index for humans + stepValidateReq := testStepValidateRequest{ + StepNumber: stepNumber, + TestCaseHasProviders: testCaseHasProviders, + } + + err := step.validate(ctx, stepValidateReq) + + if err != nil { + err := fmt.Errorf("TestStep %d/%d validation error: %w", stepNumber, len(c.Steps), err) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go index 45180d2c77..8cb4dbaa66 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go @@ -237,6 +237,7 @@ func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweepe depSweeper, ok := sweepers[dep] if !ok { + log.Printf("[ERROR] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) return fmt.Errorf("sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) } @@ -318,6 +319,11 @@ type TestCase struct { // ProviderFactories can be specified for the providers that are valid. // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. + // // These are the providers that can be referenced within the test. Each key // is an individually addressable provider. Typically you will only pass a // single value here for the provider you are testing. Aliases are not @@ -339,6 +345,11 @@ type TestCase struct { // ProtoV5ProviderFactories serves the same purpose as ProviderFactories, // but for protocol v5 providers defined using the terraform-plugin-go // ProviderServer interface. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. ProtoV5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) // ProtoV6ProviderFactories serves the same purpose as ProviderFactories, @@ -346,6 +357,11 @@ type TestCase struct { // ProviderServer interface. // The version of Terraform used in acceptance testing must be greater // than or equal to v0.15.4 to use ProtoV6ProviderFactories. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. ProtoV6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) // Providers is the ResourceProvider that will be under test. @@ -354,11 +370,18 @@ type TestCase struct { Providers map[string]*schema.Provider // ExternalProviders are providers the TestCase relies on that should - // be downloaded from the registry during init. This is only really - // necessary to set if you're using import, as providers in your config - // will be automatically retrieved during init. Import doesn't use a - // config, however, so we allow manually specifying them here to be - // downloaded for import tests. + // be downloaded from the registry during init. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. + // + // This is generally unnecessary to set at the TestCase level, however + // it has existing in the testing framework prior to the introduction of + // TestStep level specification and was only necessary for performing + // import testing where the configuration contained a provider outside the + // one under test. ExternalProviders map[string]ExternalProvider // PreventPostDestroyRefresh can be set to true for cases where data sources @@ -540,6 +563,74 @@ type TestStep struct { // fields that can't be refreshed and don't matter. ImportStateVerify bool ImportStateVerifyIgnore []string + + // ProviderFactories can be specified for the providers that are valid for + // this TestStep. When providers are specified at the TestStep level, all + // TestStep within a TestCase must declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + // + // These are the providers that can be referenced within the test. Each key + // is an individually addressable provider. Typically you will only pass a + // single value here for the provider you are testing. Aliases are not + // supported by the test framework, so to use multiple provider instances, + // you should add additional copies to this map with unique names. To set + // their configuration, you would reference them similar to the following: + // + // provider "my_factory_key" { + // # ... + // } + // + // resource "my_resource" "mr" { + // provider = my_factory_key + // + // # ... + // } + ProviderFactories map[string]func() (*schema.Provider, error) + + // ProtoV5ProviderFactories serves the same purpose as ProviderFactories, + // but for protocol v5 providers defined using the terraform-plugin-go + // ProviderServer interface. When providers are specified at the TestStep + // level, all TestStep within a TestCase must declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + ProtoV5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) + + // ProtoV6ProviderFactories serves the same purpose as ProviderFactories, + // but for protocol v6 providers defined using the terraform-plugin-go + // ProviderServer interface. + // The version of Terraform used in acceptance testing must be greater + // than or equal to v0.15.4 to use ProtoV6ProviderFactories. When providers + // are specified at the TestStep level, all TestStep within a TestCase must + // declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + ProtoV6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) + + // ExternalProviders are providers the TestStep relies on that should + // be downloaded from the registry during init. When providers are + // specified at the TestStep level, all TestStep within a TestCase must + // declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + // + // Outside specifying an earlier version of the provider under test, + // typically for state upgrader testing, this is generally only necessary + // for performing import testing where the prior TestStep configuration + // contained a provider outside the one under test. + ExternalProviders map[string]ExternalProvider } // ParallelTest performs an acceptance test on a resource, allowing concurrency @@ -593,6 +684,16 @@ func Test(t testing.T, c TestCase) { ctx := context.Background() ctx = logging.InitTestContext(ctx, t) + err := c.validate(ctx) + + if err != nil { + logging.HelperResourceError(ctx, + "Test validation error", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Test validation error: %s", err) + } + // We only run acceptance tests if an env var is set because they're // slow and generally require some outside configuration. You can opt out // of this with OverrideEnvVar on individual TestCases. @@ -608,9 +709,6 @@ func Test(t testing.T, c TestCase) { c.ProviderFactories = map[string]func() (*schema.Provider, error){} for name, p := range c.Providers { - if _, ok := c.ProviderFactories[name]; ok { - t.Fatalf("ProviderFactory for %q already exists, cannot overwrite with Provider", name) - } prov := p c.ProviderFactories[name] = func() (*schema.Provider, error) { //nolint:unparam // required signature return prov, nil @@ -648,43 +746,6 @@ func Test(t testing.T, c TestCase) { logging.HelperResourceDebug(ctx, "Finished TestCase") } -// testProviderConfig takes the list of Providers in a TestCase and returns a -// config with only empty provider blocks. This is useful for Import, where no -// config is provided, but the providers must be defined. -func testProviderConfig(c TestCase) (string, error) { - var lines []string - var requiredProviders []string - for p := range c.Providers { - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - } - for p, v := range c.ExternalProviders { - if _, ok := c.Providers[p]; ok { - return "", fmt.Errorf("Provider %q set in both Providers and ExternalProviders for TestCase. Must be set in only one.", p) - } - if _, ok := c.ProviderFactories[p]; ok { - return "", fmt.Errorf("Provider %q set in both ProviderFactories and ExternalProviders for TestCase. Must be set in only one.", p) - } - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - var providerBlock string - if v.VersionConstraint != "" { - providerBlock = fmt.Sprintf("%s\nversion = %q", providerBlock, v.VersionConstraint) - } - if v.Source != "" { - providerBlock = fmt.Sprintf("%s\nsource = %q", providerBlock, v.Source) - } - if providerBlock != "" { - providerBlock = fmt.Sprintf("%s = {%s\n}\n", p, providerBlock) - } - requiredProviders = append(requiredProviders, providerBlock) - } - - if len(requiredProviders) > 0 { - lines = append([]string{fmt.Sprintf("terraform {\nrequired_providers {\n%s}\n}\n\n", strings.Join(requiredProviders, ""))}, lines...) - } - - return strings.Join(lines, ""), nil -} - // UnitTest is a helper to force the acceptance testing harness to run in the // normal unit test suite. This should only be used for resource that don't // have any external dependencies. @@ -698,10 +759,6 @@ func UnitTest(t testing.T, c TestCase) { } func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { - if c.ResourceName == "" { - return nil, fmt.Errorf("ResourceName must be set in TestStep") - } - for _, m := range state.Modules { if len(m.Resources) > 0 { if v, ok := m.Resources[c.ResourceName]; ok { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go index 163bd3d668..f1e607f834 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go @@ -10,23 +10,17 @@ import ( tfjson "github.com/hashicorp/terraform-json" testing "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-go/tfprotov6" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func runPostTestDestroy(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, factories map[string]func() (*schema.Provider, error), v5factories map[string]func() (tfprotov5.ProviderServer, error), v6factories map[string]func() (tfprotov6.ProviderServer, error), statePreDestroy *terraform.State) error { +func runPostTestDestroy(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, providers *providerFactories, statePreDestroy *terraform.State) error { t.Helper() err := runProviderCommand(ctx, t, func() error { return wd.Destroy(ctx) - }, wd, providerFactories{ - legacy: factories, - protov5: v5factories, - protov6: v6factories}) + }, wd, providers) if err != nil { return err } @@ -55,6 +49,12 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest ctx = logging.TestTerraformPathContext(ctx, wd.GetHelper().TerraformExecPath()) ctx = logging.TestWorkingDirectoryContext(ctx, wd.GetHelper().WorkingDirectory()) + providers := &providerFactories{ + legacy: c.ProviderFactories, + protov5: c.ProtoV5ProviderFactories, + protov6: c.ProtoV6ProviderFactories, + } + defer func() { var statePreDestroy *terraform.State var err error @@ -64,10 +64,7 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { logging.HelperResourceError(ctx, "Error retrieving state, there may be dangling resources", @@ -78,7 +75,7 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest } if !stateIsEmpty(statePreDestroy) { - err := runPostTestDestroy(ctx, t, c, wd, c.ProviderFactories, c.ProtoV5ProviderFactories, c.ProtoV6ProviderFactories, statePreDestroy) + err := runPostTestDestroy(ctx, t, c, wd, providers, statePreDestroy) if err != nil { logging.HelperResourceError(ctx, "Error running post-test destroy, there may be dangling resources", @@ -91,36 +88,28 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest wd.Close() }() - providerCfg, err := testProviderConfig(c) - if err != nil { - logging.HelperResourceError(ctx, - "Error creating test provider configuration", - map[string]interface{}{logging.KeyError: err}, - ) - t.Fatalf("Error creating test provider configuration: %s", err.Error()) - } + if c.hasProviders(ctx) { + err := wd.SetConfig(ctx, c.providerConfig(ctx)) - err = wd.SetConfig(ctx, providerCfg) - if err != nil { - logging.HelperResourceError(ctx, - "Error setting test provider configuration", - map[string]interface{}{logging.KeyError: err}, - ) - t.Fatalf("Error setting test provider configuration: %s", err) - } - err = runProviderCommand(ctx, t, func() error { - return wd.Init(ctx) - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) - if err != nil { - logging.HelperResourceError(ctx, - "Error running init", - map[string]interface{}{logging.KeyError: err}, - ) - t.Fatalf("Error running init: %s", err.Error()) - return + if err != nil { + logging.HelperResourceError(ctx, + "TestCase error setting provider configuration", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestCase error setting provider configuration: %s", err) + } + + err = runProviderCommand(ctx, t, func() error { + return wd.Init(ctx) + }, wd, providers) + + if err != nil { + logging.HelperResourceError(ctx, + "TestCase error running init", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestCase error running init: %s", err.Error()) + } } logging.HelperResourceDebug(ctx, "Starting TestSteps") @@ -129,8 +118,9 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest // acts as default for import tests var appliedCfg string - for i, step := range c.Steps { - ctx = logging.TestStepNumberContext(ctx, i+1) + for stepIndex, step := range c.Steps { + stepNumber := stepIndex + 1 // 1-based indexing for humans + ctx = logging.TestStepNumberContext(ctx, stepNumber) logging.HelperResourceDebug(ctx, "Starting TestStep") @@ -155,30 +145,103 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest logging.HelperResourceDebug(ctx, "Called TestStep SkipFunc") if skip { - t.Logf("Skipping step %d/%d due to SkipFunc", i+1, len(c.Steps)) + t.Logf("Skipping step %d/%d due to SkipFunc", stepNumber, len(c.Steps)) logging.HelperResourceWarn(ctx, "Skipping TestStep due to SkipFunc") continue } } + if step.Config != "" && !step.Destroy && len(step.Taint) > 0 { + var state *terraform.State + + err := runProviderCommand(ctx, t, func() error { + var err error + + state, err = getState(ctx, t, wd) + + if err != nil { + return err + } + + return nil + }, wd, providers) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error reading prior state before tainting resources", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error reading prior state before tainting resources: %s", stepNumber, len(c.Steps), err) + } + + err = testStepTaint(ctx, state, step) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error tainting resources", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error tainting resources: %s", stepNumber, len(c.Steps), err) + } + } + + if step.hasProviders(ctx) { + providers = &providerFactories{ + legacy: sdkProviderFactories(c.ProviderFactories).merge(step.ProviderFactories), + protov5: protov5ProviderFactories(c.ProtoV5ProviderFactories).merge(step.ProtoV5ProviderFactories), + protov6: protov6ProviderFactories(c.ProtoV6ProviderFactories).merge(step.ProtoV6ProviderFactories), + } + + providerCfg := step.providerConfig(ctx) + + err := wd.SetConfig(ctx, providerCfg) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error setting provider configuration", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error setting test provider configuration: %s", stepNumber, len(c.Steps), err) + } + + err = runProviderCommand( + ctx, + t, + func() error { + return wd.Init(ctx) + }, + wd, + providers, + ) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error running init", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d running init: %s", stepNumber, len(c.Steps), err.Error()) + return + } + } + if step.ImportState { logging.HelperResourceTrace(ctx, "TestStep is ImportState mode") - err := testStepNewImportState(ctx, t, c, helper, wd, step, appliedCfg) + err := testStepNewImportState(ctx, t, helper, wd, step, appliedCfg, providers) if step.ExpectError != nil { logging.HelperResourceDebug(ctx, "Checking TestStep ExpectError") if err == nil { logging.HelperResourceError(ctx, "Error running import: expected an error but got none", ) - t.Fatalf("Step %d/%d error running import: expected an error but got none", i+1, len(c.Steps)) + t.Fatalf("Step %d/%d error running import: expected an error but got none", stepNumber, len(c.Steps)) } if !step.ExpectError.MatchString(err.Error()) { logging.HelperResourceError(ctx, fmt.Sprintf("Error running import: expected an error with pattern (%s)", step.ExpectError.String()), map[string]interface{}{logging.KeyError: err}, ) - t.Fatalf("Step %d/%d error running import, expected an error with pattern (%s), no match on: %s", i+1, len(c.Steps), step.ExpectError.String(), err) + t.Fatalf("Step %d/%d error running import, expected an error with pattern (%s), no match on: %s", stepNumber, len(c.Steps), step.ExpectError.String(), err) } } else { if err != nil && c.ErrorCheck != nil { @@ -191,7 +254,7 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest "Error running import", map[string]interface{}{logging.KeyError: err}, ) - t.Fatalf("Step %d/%d error running import: %s", i+1, len(c.Steps), err) + t.Fatalf("Step %d/%d error running import: %s", stepNumber, len(c.Steps), err) } } @@ -203,7 +266,7 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest if step.Config != "" { logging.HelperResourceTrace(ctx, "TestStep is Config mode") - err := testStepNewConfig(ctx, t, c, wd, step) + err := testStepNewConfig(ctx, t, c, wd, step, providers) if step.ExpectError != nil { logging.HelperResourceDebug(ctx, "Checking TestStep ExpectError") @@ -211,14 +274,14 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest logging.HelperResourceError(ctx, "Expected an error but got none", ) - t.Fatalf("Step %d/%d, expected an error but got none", i+1, len(c.Steps)) + t.Fatalf("Step %d/%d, expected an error but got none", stepNumber, len(c.Steps)) } if !step.ExpectError.MatchString(err.Error()) { logging.HelperResourceError(ctx, fmt.Sprintf("Expected an error with pattern (%s)", step.ExpectError.String()), map[string]interface{}{logging.KeyError: err}, ) - t.Fatalf("Step %d/%d, expected an error with pattern, no match on: %s", i+1, len(c.Steps), err) + t.Fatalf("Step %d/%d, expected an error with pattern, no match on: %s", stepNumber, len(c.Steps), err) } } else { if err != nil && c.ErrorCheck != nil { @@ -233,7 +296,7 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest "Unexpected error", map[string]interface{}{logging.KeyError: err}, ) - t.Fatalf("Step %d/%d error: %s", i+1, len(c.Steps), err) + t.Fatalf("Step %d/%d error: %s", stepNumber, len(c.Steps), err) } } @@ -244,7 +307,7 @@ func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest continue } - t.Fatalf("Step %d/%d, unsupported test mode", i+1, len(c.Steps)) + t.Fatalf("Step %d/%d, unsupported test mode", stepNumber, len(c.Steps)) } } @@ -277,7 +340,7 @@ func planIsEmpty(plan *tfjson.Plan) bool { return true } -func testIDRefresh(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, r *terraform.ResourceState) error { +func testIDRefresh(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, r *terraform.ResourceState, providers *providerFactories) error { t.Helper() spewConf := spew.NewDefaultConfig() @@ -291,11 +354,7 @@ func testIDRefresh(ctx context.Context, t testing.T, c TestCase, wd *plugintest. // Temporarily set the config to a minimal provider config for the refresh // test. After the refresh we can reset it. - cfg, err := testProviderConfig(c) - if err != nil { - return err - } - err = wd.SetConfig(ctx, cfg) + err := wd.SetConfig(ctx, c.providerConfig(ctx)) if err != nil { t.Fatalf("Error setting import test config: %s", err) } @@ -317,10 +376,7 @@ func testIDRefresh(ctx context.Context, t testing.T, c TestCase, wd *plugintest. return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return err } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go index 5a9df2bf5f..09d18c3645 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go @@ -13,30 +13,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep) error { +func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, providers *providerFactories) error { t.Helper() - if !step.Destroy { - var state *terraform.State - var err error - err = runProviderCommand(ctx, t, func() error { - state, err = getState(ctx, t, wd) - if err != nil { - return err - } - return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) - if err != nil { - return err - } - if err := testStepTaint(ctx, state, step); err != nil { - return fmt.Errorf("Error when tainting resources: %s", err) - } - } - err := wd.SetConfig(ctx, step.Config) if err != nil { return fmt.Errorf("Error setting config: %w", err) @@ -46,10 +25,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint // failing to do this will result in data sources not being updated err = runProviderCommand(ctx, t, func() error { return wd.Refresh(ctx) - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error running pre-apply refresh: %w", err) } @@ -66,10 +42,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint return wd.CreateDestroyPlan(ctx) } return wd.CreatePlan(ctx) - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error running pre-apply plan: %w", err) } @@ -84,10 +57,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving pre-apply state: %w", err) } @@ -95,10 +65,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint // Apply the diff, creating real resources err = runProviderCommand(ctx, t, func() error { return wd.Apply(ctx) - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { if step.Destroy { return fmt.Errorf("Error running destroy: %w", err) @@ -114,10 +81,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving state after apply: %w", err) } @@ -148,10 +112,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint return wd.CreateDestroyPlan(ctx) } return wd.CreatePlan(ctx) - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error running post-apply plan: %w", err) } @@ -161,10 +122,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint var err error plan, err = wd.SavedPlan(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving post-apply plan: %w", err) } @@ -175,10 +133,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint var err error stdout, err = wd.SavedPlanRawStdout(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving formatted plan output: %w", err) } @@ -189,10 +144,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { err := runProviderCommand(ctx, t, func() error { return wd.Refresh(ctx) - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error running post-apply refresh: %w", err) } @@ -204,10 +156,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint return wd.CreateDestroyPlan(ctx) } return wd.CreatePlan(ctx) - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error running second post-apply plan: %w", err) } @@ -216,10 +165,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint var err error plan, err = wd.SavedPlan(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving second post-apply plan: %w", err) } @@ -231,10 +177,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint var err error stdout, err = wd.SavedPlanRawStdout(ctx) return err - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return fmt.Errorf("Error retrieving formatted second plan output: %w", err) } @@ -257,10 +200,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { return err @@ -290,7 +230,7 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint // this fails. If refresh isn't read-only, then this will have // caught a different bug. if idRefreshCheck != nil { - if err := testIDRefresh(ctx, t, c, wd, step, idRefreshCheck); err != nil { + if err := testIDRefresh(ctx, t, c, wd, step, idRefreshCheck, providers); err != nil { return fmt.Errorf( "[ERROR] Test: ID-only test failed: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go index 1dc98143a6..ec61b055f3 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testStepNewImportState(ctx context.Context, t testing.T, c TestCase, helper *plugintest.Helper, wd *plugintest.WorkingDir, step TestStep, cfg string) error { +func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest.Helper, wd *plugintest.WorkingDir, step TestStep, cfg string, providers *providerFactories) error { t.Helper() spewConf := spew.NewDefaultConfig() @@ -33,10 +33,7 @@ func testStepNewImportState(ctx context.Context, t testing.T, c TestCase, helper return err } return nil - }, wd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, wd, providers) if err != nil { t.Fatalf("Error getting state: %s", err) } @@ -100,20 +97,14 @@ func testStepNewImportState(ctx context.Context, t testing.T, c TestCase, helper err = runProviderCommand(ctx, t, func() error { return importWd.Init(ctx) - }, importWd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, importWd, providers) if err != nil { t.Fatalf("Error running init: %s", err) } err = runProviderCommand(ctx, t, func() error { return importWd.Import(ctx, step.ResourceName, importId) - }, importWd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, importWd, providers) if err != nil { return err } @@ -125,10 +116,7 @@ func testStepNewImportState(ctx context.Context, t testing.T, c TestCase, helper return err } return nil - }, importWd, providerFactories{ - legacy: c.ProviderFactories, - protov5: c.ProtoV5ProviderFactories, - protov6: c.ProtoV6ProviderFactories}) + }, importWd, providers) if err != nil { t.Fatalf("Error getting state: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go new file mode 100644 index 0000000000..35d4a9bf57 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go @@ -0,0 +1,48 @@ +package resource + +import ( + "context" + "fmt" + "strings" +) + +// providerConfig takes the list of providers in a TestStep and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func (s TestStep) providerConfig(_ context.Context) string { + var providerBlocks, requiredProviderBlocks strings.Builder + + for name, externalProvider := range s.ExternalProviders { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + + if externalProvider.Source == "" && externalProvider.VersionConstraint == "" { + continue + } + + requiredProviderBlocks.WriteString(fmt.Sprintf(" %s = {\n", name)) + + if externalProvider.Source != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" source = %q\n", externalProvider.Source)) + } + + if externalProvider.VersionConstraint != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" version = %q\n", externalProvider.VersionConstraint)) + } + + requiredProviderBlocks.WriteString(" }\n") + } + + if requiredProviderBlocks.Len() > 0 { + return fmt.Sprintf(` +terraform { + required_providers { +%[1]s + } +} + +%[2]s +`, strings.TrimSuffix(requiredProviderBlocks.String(), "\n"), providerBlocks.String()) + } + + return providerBlocks.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go new file mode 100644 index 0000000000..e9239328c6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go @@ -0,0 +1,99 @@ +package resource + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +// testStepValidateRequest contains data for the (TestStep).validate() method. +type testStepValidateRequest struct { + // StepNumber is the index of the TestStep in the TestCase.Steps. + StepNumber int + + // TestCaseHasProviders is enabled if the TestCase has set any of + // ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, + // or ProviderFactories. + TestCaseHasProviders bool +} + +// hasProviders returns true if the TestStep has set any of the +// ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, or +// ProviderFactories fields. +func (s TestStep) hasProviders(_ context.Context) bool { + if len(s.ExternalProviders) > 0 { + return true + } + + if len(s.ProtoV5ProviderFactories) > 0 { + return true + } + + if len(s.ProtoV6ProviderFactories) > 0 { + return true + } + + if len(s.ProviderFactories) > 0 { + return true + } + + return false +} + +// validate ensures the TestStep is valid based on the following criteria: +// +// - Config or ImportState is set. +// - Providers are not specified (ExternalProviders, +// ProtoV5ProviderFactories, ProtoV6ProviderFactories, ProviderFactories) +// if specified at the TestCase level. +// - Providers are specified (ExternalProviders, ProtoV5ProviderFactories, +// ProtoV6ProviderFactories, ProviderFactories) if not specified at the +// TestCase level. +// - No overlapping ExternalProviders and ProviderFactories entries +// - ResourceName is not empty when ImportState is true, ImportStateIdFunc +// is not set, and ImportStateId is not set. +// +func (s TestStep) validate(ctx context.Context, req testStepValidateRequest) error { + ctx = logging.TestStepNumberContext(ctx, req.StepNumber) + + logging.HelperResourceTrace(ctx, "Validating TestStep") + + if s.Config == "" && !s.ImportState { + err := fmt.Errorf("TestStep missing Config or ImportState") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + for name := range s.ExternalProviders { + if _, ok := s.ProviderFactories[name]; ok { + err := fmt.Errorf("TestStep provider %q set in both ExternalProviders and ProviderFactories", name) + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + hasProviders := s.hasProviders(ctx) + + if req.TestCaseHasProviders && hasProviders { + err := fmt.Errorf("Providers must only be specified either at the TestCase or TestStep level") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if !req.TestCaseHasProviders && !hasProviders { + err := fmt.Errorf("Providers must be specified at the TestCase level or in all TestStep") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.ImportState { + if s.ImportStateId == "" && s.ImportStateIdFunc == nil && s.ResourceName == "" { + err := fmt.Errorf("TestStep ImportState must be specified with ImportStateId, ImportStateIdFunc, or ResourceName") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go index 2ba548f61d..03931b0247 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go @@ -31,6 +31,15 @@ const ( // The TestStep number of the test being executed. Starts at 1. KeyTestStepNumber = "test_step_number" + // The Terraform CLI logging level (TF_LOG) used for an acceptance test. + KeyTestTerraformLogLevel = "test_terraform_log_level" + + // The Terraform CLI logging level (TF_LOG_CORE) used for an acceptance test. + KeyTestTerraformLogCoreLevel = "test_terraform_log_core_level" + + // The Terraform CLI logging level (TF_LOG_PROVIDER) used for an acceptance test. + KeyTestTerraformLogProviderLevel = "test_terraform_log_provider_level" + // The path to the Terraform CLI logging file used for an acceptance test. // // This should match where the rest of the acceptance test logs are going diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go index ac75151a4e..6fd001a07d 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go @@ -10,6 +10,24 @@ const ( // CLI installation, if installation is required. EnvTfAccTempDir = "TF_ACC_TEMP_DIR" + // Environment variable with level to filter Terraform logs during + // acceptance testing. This value sets TF_LOG in a safe manner when + // executing Terraform CLI commands, which would otherwise interfere + // with the testing framework using TF_LOG to set the Go standard library + // log package level. + // + // This value takes precedence over TF_LOG_CORE, due to precedence rules + // in the Terraform core code, so it is not possible to set this to a level + // and also TF_LOG_CORE=OFF. Use TF_LOG_CORE and TF_LOG_PROVIDER in that + // case instead. + // + // If not set, but TF_ACC_LOG_PATH or TF_LOG_PATH_MASK is set, it defaults + // to TRACE. If Terraform CLI is version 0.14 or earlier, it will have no + // separate affect from the TF_ACC_LOG_PATH or TF_LOG_PATH_MASK behavior, + // as those earlier versions of Terraform are unreliable with the logging + // level being outside TRACE. + EnvTfAccLog = "TF_ACC_LOG" + // Environment variable with path to save Terraform logs during acceptance // testing. This value sets TF_LOG_PATH in a safe manner when executing // Terraform CLI commands, which would otherwise be ignored since it could @@ -18,6 +36,17 @@ const ( // If TF_LOG_PATH_MASK is set, it takes precedence over this value. EnvTfAccLogPath = "TF_ACC_LOG_PATH" + // Environment variable with level to filter Terraform core logs during + // acceptance testing. This value sets TF_LOG_CORE separate from + // TF_LOG_PROVIDER when calling Terraform. + // + // This value has no affect when TF_ACC_LOG is set (which sets Terraform's + // TF_LOG), due to precedence rules in the Terraform core code. Use + // TF_LOG_CORE and TF_LOG_PROVIDER in that case instead. + // + // If not set, defaults to TF_ACC_LOG behaviors. + EnvTfLogCore = "TF_LOG_CORE" + // Environment variable with path containing the string %s, which is // replaced with the test name, to save separate Terraform logs during // acceptance testing. This value sets TF_LOG_PATH in a safe manner when @@ -27,6 +56,22 @@ const ( // Takes precedence over TF_ACC_LOG_PATH. EnvTfLogPathMask = "TF_LOG_PATH_MASK" + // Environment variable with level to filter Terraform provider logs during + // acceptance testing. This value sets TF_LOG_PROVIDER separate from + // TF_LOG_CORE. + // + // During testing, this only affects external providers whose logging goes + // through Terraform. The logging for the provider under test is controlled + // by the testing framework as it is running the provider code. Provider + // code using the Go standard library log package is controlled by TF_LOG + // for historical compatibility. + // + // This value takes precedence over TF_ACC_LOG for external provider logs, + // due to rules in the Terraform core code. + // + // If not set, defaults to TF_ACC_LOG behaviors. + EnvTfLogProvider = "TF_LOG_PROVIDER" + // Environment variable with acceptance testing Terraform CLI version to // download from releases.hashicorp.com, checksum verify, and install. The // value can be any valid Terraform CLI version, such as 1.1.6, with or diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go index d5aecd87bc..0411eae0a8 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go @@ -139,9 +139,94 @@ func (h *Helper) NewWorkingDir(ctx context.Context, t TestControl) (*WorkingDir, return nil, fmt.Errorf("unable to disable terraform-exec provider verification: %w", err) } + tfAccLog := os.Getenv(EnvTfAccLog) + tfAccLogPath := os.Getenv(EnvTfAccLogPath) + tfLogCore := os.Getenv(EnvTfLogCore) + tfLogPathMask := os.Getenv(EnvTfLogPathMask) + tfLogProvider := os.Getenv(EnvTfLogProvider) + + if tfAccLog != "" && tfLogCore != "" { + err = fmt.Errorf( + "Invalid environment variable configuration. Cannot set both TF_ACC_LOG and TF_LOG_CORE. " + + "Use TF_LOG_CORE and TF_LOG_PROVIDER to separately control the Terraform CLI logging subsystems. " + + "To control the Go standard library log package for the provider under test, use TF_LOG.", + ) + logging.HelperResourceError(ctx, err.Error()) + return nil, err + } + + if tfAccLog != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec log level via %s environment variable, if Terraform CLI is version 0.15 or later", EnvTfAccLog), + map[string]interface{}{logging.KeyTestTerraformLogLevel: tfAccLog}, + ) + + err := tf.SetLog(tfAccLog) + + if err != nil { + if !errors.As(err, new(*tfexec.ErrVersionMismatch)) { + logging.HelperResourceError( + ctx, + "Unable to set terraform-exec log level", + map[string]interface{}{logging.KeyError: err.Error()}, + ) + return nil, fmt.Errorf("unable to set terraform-exec log level (%s): %w", tfAccLog, err) + } + + logging.HelperResourceWarn( + ctx, + fmt.Sprintf("Unable to set terraform-exec log level via %s environment variable, as Terraform CLI is version 0.14 or earlier. It will default to TRACE.", EnvTfAccLog), + map[string]interface{}{logging.KeyTestTerraformLogLevel: "TRACE"}, + ) + } + } + + if tfLogCore != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec core log level via %s environment variable, if Terraform CLI is version 0.15 or later", EnvTfLogCore), + map[string]interface{}{ + logging.KeyTestTerraformLogCoreLevel: tfLogCore, + }, + ) + + err := tf.SetLogCore(tfLogCore) + + if err != nil { + logging.HelperResourceError( + ctx, + "Unable to set terraform-exec core log level", + map[string]interface{}{logging.KeyError: err.Error()}, + ) + return nil, fmt.Errorf("unable to set terraform-exec core log level (%s): %w", tfLogCore, err) + } + } + + if tfLogProvider != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec provider log level via %s environment variable, if Terraform CLI is version 0.15 or later", EnvTfLogProvider), + map[string]interface{}{ + logging.KeyTestTerraformLogCoreLevel: tfLogProvider, + }, + ) + + err := tf.SetLogProvider(tfLogProvider) + + if err != nil { + logging.HelperResourceError( + ctx, + "Unable to set terraform-exec provider log level", + map[string]interface{}{logging.KeyError: err.Error()}, + ) + return nil, fmt.Errorf("unable to set terraform-exec provider log level (%s): %w", tfLogProvider, err) + } + } + var logPath, logPathEnvVar string - if tfAccLogPath := os.Getenv(EnvTfAccLogPath); tfAccLogPath != "" { + if tfAccLogPath != "" { logPath = tfAccLogPath logPathEnvVar = EnvTfAccLogPath } @@ -149,7 +234,7 @@ func (h *Helper) NewWorkingDir(ctx context.Context, t TestControl) (*WorkingDir, // Similar to helper/logging.LogOutput() and // terraform-plugin-log/tfsdklog.RegisterTestSink(), the TF_LOG_PATH_MASK // environment variable should take precedence over TF_ACC_LOG_PATH. - if tfLogPathMask := os.Getenv(EnvTfLogPathMask); tfLogPathMask != "" { + if tfLogPathMask != "" { // Escape special characters which may appear if we have subtests testName := strings.Replace(t.Name(), "/", "__", -1) logPath = fmt.Sprintf(tfLogPathMask, testName) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go index 5309fcfb0a..1c15e6f5c1 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go @@ -155,7 +155,9 @@ func (wd *WorkingDir) Init(ctx context.Context) error { logging.HelperResourceTrace(ctx, "Calling Terraform CLI init command") - err := wd.tf.Init(context.Background(), tfexec.Reattach(wd.reattachInfo)) + // -upgrade=true is required for per-TestStep provider version changes + // e.g. TestTest_TestStep_ExternalProviders_DifferentVersions + err := wd.tf.Init(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Upgrade(true)) logging.HelperResourceTrace(ctx, "Called Terraform CLI init command") diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/access_context_manager_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/access_context_manager_operation.go index 424ad20c05..4c2fe03a07 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/access_context_manager_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/access_context_manager_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/active_directory_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/active_directory_operation.go index a0456a9900..bb2c772d58 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/active_directory_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/active_directory_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/api_gateway_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/api_gateway_operation.go index 2d2f7c3d70..b6f4c040cb 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/api_gateway_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/api_gateway_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/apigee_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/apigee_operation.go index f1bd1da9cb..5a05d876f3 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/apigee_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/apigee_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/artifact_registry_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/artifact_registry_operation.go index d8157ca3c9..92324ba8e4 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/artifact_registry_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/artifact_registry_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/certificate_manager_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/certificate_manager_operation.go index 8e71c2cc07..f8320d5ed9 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/certificate_manager_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/certificate_manager_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/cloudfunctions2_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/cloudfunctions2_operation.go index 53c35f08cb..e50b32bd6b 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/cloudfunctions2_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/cloudfunctions2_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress.go index 7a896ecb47..176ca49aec 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/common_diff_suppress.go @@ -8,6 +8,7 @@ import ( "log" "net" "reflect" + "regexp" "strconv" "strings" "time" @@ -210,3 +211,29 @@ func compareOptionalSubnet(_, old, new string, _ *schema.ResourceData) bool { // otherwise compare as self links return compareSelfLinkOrResourceName("", old, new, nil) } + +// Suppress diffs in below cases +// "https://hello-rehvs75zla-uc.a.run.app/" -> "https://hello-rehvs75zla-uc.a.run.app" +// "https://hello-rehvs75zla-uc.a.run.app" -> "https://hello-rehvs75zla-uc.a.run.app/" +func lastSlashDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + if last := len(new) - 1; last >= 0 && new[last] == '/' { + new = new[:last] + } + + if last := len(old) - 1; last >= 0 && old[last] == '/' { + old = old[:last] + } + return new == old +} + +// Suppress diffs when the value read from api +// has the project number instead of the project name +func projectNumberDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + var a2, b2 string + reN := regexp.MustCompile("projects/\\d+") + re := regexp.MustCompile("projects/[^/]+") + replacement := []byte("projects/equal") + a2 = string(reN.ReplaceAll([]byte(old), replacement)) + b2 = string(re.ReplaceAll([]byte(new), replacement)) + return a2 == b2 +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/compute_instance_helpers.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/compute_instance_helpers.go index fbe60029c1..cd92468ced 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/compute_instance_helpers.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/compute_instance_helpers.go @@ -1,4 +1,3 @@ -// package google import ( @@ -122,15 +121,20 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { scheduling.ProvisioningModel = v.(string) scheduling.ForceSendFields = append(scheduling.ForceSendFields, "ProvisioningModel") } + if v, ok := original["instance_termination_action"]; ok { + scheduling.InstanceTerminationAction = v.(string) + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "InstanceTerminationAction") + } return scheduling, nil } func flattenScheduling(resp *compute.Scheduling) []map[string]interface{} { schedulingMap := map[string]interface{}{ - "on_host_maintenance": resp.OnHostMaintenance, - "preemptible": resp.Preemptible, - "min_node_cpus": resp.MinNodeCpus, - "provisioning_model": resp.ProvisioningModel, + "on_host_maintenance": resp.OnHostMaintenance, + "preemptible": resp.Preemptible, + "min_node_cpus": resp.MinNodeCpus, + "provisioning_model": resp.ProvisioningModel, + "instance_termination_action": resp.InstanceTerminationAction, } if resp.AutomaticRestart != nil { @@ -175,6 +179,7 @@ func flattenIpv6AccessConfigs(ipv6AccessConfigs []*compute.AccessConfig) []map[s "network_tier": ac.NetworkTier, } flattened[i]["public_ptr_domain_name"] = ac.PublicPtrDomainName + flattened[i]["external_ipv6"] = ac.ExternalIpv6 } return flattened } @@ -479,6 +484,10 @@ func schedulingHasChangeWithoutReboot(d *schema.ResourceData) bool { return true } + if oScheduling["instance_termination_action"] != newScheduling["instance_termination_action"] { + return true + } + return false } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/config.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/config.go index 90695db827..04c4cedbec 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/config.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/config.go @@ -67,7 +67,7 @@ type Formatter struct { // Borrowed logic from https://github.com/sirupsen/logrus/blob/master/json_formatter.go and https://github.com/t-tomalak/logrus-easy-formatter/blob/master/formatter.go func (f *Formatter) Format(entry *logrus.Entry) ([]byte, error) { // Suppress logs if TF_LOG is not DEBUG or TRACE - // also suppress frequent transport spam + // Also suppress frequent transport spam if !logging.IsDebugOrHigher() || strings.Contains(entry.Message, "transport is closing") { return nil, nil } @@ -137,6 +137,7 @@ func (f *Formatter) Format(entry *logrus.Entry) ([]byte, error) { // Config is the configuration structure used to instantiate the Google // provider. type Config struct { + DCLConfig AccessToken string Credentials string ImpersonateServiceAccount string @@ -253,26 +254,12 @@ type Config struct { StorageTransferBasePath string BigtableAdminBasePath string + // dcl + ContainerAwsBasePath string + ContainerAzureBasePath string + requestBatcherServiceUsage *RequestBatcher requestBatcherIam *RequestBatcher - - // start DCLBasePaths - // dataprocBasePath is implemented in mm - AssuredWorkloadsBasePath string - ClouddeployBasePath string - CloudResourceManagerBasePath string - ContainerAwsBasePath string - ContainerAzureBasePath string - DataplexBasePath string - EventarcBasePath string - FirebaserulesBasePath string - GkeHubBasePath string - NetworkConnectivityBasePath string - OrgPolicyBasePath string - RecaptchaEnterpriseBasePath string - ApikeysBasePath string - // CloudBuild WorkerPool uses a different endpoint (v1beta1) than any other CloudBuild resources - CloudBuildWorkerPoolBasePath string } const AccessApprovalBasePathKey = "AccessApproval" @@ -364,7 +351,6 @@ const ResourceManagerV3BasePathKey = "ResourceManagerV3" const ServiceNetworkingBasePathKey = "ServiceNetworking" const StorageTransferBasePathKey = "StorageTransfer" const BigtableAdminBasePathKey = "BigtableAdmin" -const GkeHubFeatureBasePathKey = "GkeHubFeature" const ContainerAwsBasePathKey = "ContainerAws" const ContainerAzureBasePathKey = "ContainerAzure" @@ -372,7 +358,7 @@ const ContainerAzureBasePathKey = "ContainerAzure" var DefaultBasePaths = map[string]string{ AccessApprovalBasePathKey: "https://accessapproval.googleapis.com/v1/", AccessContextManagerBasePathKey: "https://accesscontextmanager.googleapis.com/v1/", - ActiveDirectoryBasePathKey: "https://managedidentities.googleapis.com/v1/", + ActiveDirectoryBasePathKey: "https://managedidentities.googleapis.com/v1beta1/", ApiGatewayBasePathKey: "https://apigateway.googleapis.com/v1beta/", ApigeeBasePathKey: "https://apigee.googleapis.com/v1/", AppEngineBasePathKey: "https://appengine.googleapis.com/v1/", @@ -406,7 +392,7 @@ var DefaultBasePaths = map[string]string{ DialogflowBasePathKey: "https://dialogflow.googleapis.com/v2/", DialogflowCXBasePathKey: "https://{{location}}-dialogflow.googleapis.com/v3/", DNSBasePathKey: "https://dns.googleapis.com/dns/v1beta2/", - DocumentAIBasePathKey: "https://documentai.googleapis.com/v1/", + DocumentAIBasePathKey: "https://{{location}}-documentai.googleapis.com/v1/", EssentialContactsBasePathKey: "https://essentialcontacts.googleapis.com/v1/", FilestoreBasePathKey: "https://file.googleapis.com/v1beta1/", FirebaseBasePathKey: "https://firebase.googleapis.com/v1beta1/", @@ -459,7 +445,6 @@ var DefaultBasePaths = map[string]string{ ServiceNetworkingBasePathKey: "https://servicenetworking.googleapis.com/v1/", StorageTransferBasePathKey: "https://storagetransfer.googleapis.com/v1/", BigtableAdminBasePathKey: "https://bigtableadmin.googleapis.com/v2/", - GkeHubFeatureBasePathKey: "https://gkehub.googleapis.com/v1beta/", ContainerAwsBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", ContainerAzureBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_fusion_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_fusion_operation.go index 9b9839740d..0df3958a7e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_fusion_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_fusion_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone.go index 40426a68e3..734fe2ac8f 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_dns_managed_zone.go @@ -26,6 +26,12 @@ func dataSourceDnsManagedZone() *schema.Resource { Computed: true, }, + "managed_zone_id": { + Type: schema.TypeInt, + Computed: true, + Description: `Unique identifier for the resource; defined by the server.`, + }, + "name_servers": { Type: schema.TypeList, Computed: true, @@ -69,18 +75,21 @@ func dataSourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) erro return handleNotFoundError(err, d, fmt.Sprintf("dataSourceDnsManagedZone %q", name)) } - if err := d.Set("name_servers", zone.NameServers); err != nil { - return fmt.Errorf("Error setting name_servers: %s", err) + if err := d.Set("dns_name", zone.DnsName); err != nil { + return fmt.Errorf("Error setting dns_name: %s", err) } if err := d.Set("name", zone.Name); err != nil { return fmt.Errorf("Error setting name: %s", err) } - if err := d.Set("dns_name", zone.DnsName); err != nil { - return fmt.Errorf("Error setting dns_name: %s", err) - } if err := d.Set("description", zone.Description); err != nil { return fmt.Errorf("Error setting description: %s", err) } + if err := d.Set("managed_zone_id", zone.Id); err != nil { + return fmt.Errorf("Error setting managed_zone_id: %s", err) + } + if err := d.Set("name_servers", zone.NameServers); err != nil { + return fmt.Errorf("Error setting name_servers: %s", err) + } if err := d.Set("visibility", zone.Visibility); err != nil { return fmt.Errorf("Error setting visibility: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_iam_policy.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_iam_policy.go index cee2d595e7..79887ff109 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_iam_policy.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_iam_policy.go @@ -16,14 +16,14 @@ import ( // to express a Google Cloud IAM policy in a data resource. This is an example // of how the schema would be used in a config: // -// data "google_iam_policy" "admin" { -// binding { -// role = "roles/storage.objectViewer" -// members = [ -// "user:evanbrown@google.com", -// ] -// } -// } +// data "google_iam_policy" "admin" { +// binding { +// role = "roles/storage.objectViewer" +// members = [ +// "user:evanbrown@google.com", +// ] +// } +// } func dataSourceGoogleIamPolicy() *schema.Resource { return &schema.Resource{ Read: dataSourceGoogleIamPolicyRead, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_project.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_project.go index ffc9cbde8e..49ad537f61 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_project.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_project.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -26,7 +27,7 @@ func datasourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error } else { project, err := getProject(d, config) if err != nil { - return err + return fmt.Errorf("no project value set. `project_id` must be set at the resource level, or a default `project` value must be specified on the provider") } d.SetId(fmt.Sprintf("projects/%s", project)) } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_service_account_jwt.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_service_account_jwt.go new file mode 100644 index 0000000000..a70f4d0687 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_google_service_account_jwt.go @@ -0,0 +1,74 @@ +package google + +import ( + "fmt" + "strings" + + iamcredentials "google.golang.org/api/iamcredentials/v1" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceGoogleServiceAccountJwt() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleServiceAccountJwtRead, + Schema: map[string]*schema.Schema{ + "payload": { + Type: schema.TypeString, + Required: true, + Description: `A JSON-encoded JWT claims set that will be included in the signed JWT.`, + }, + "target_service_account": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateRegexp("(" + strings.Join(PossibleServiceAccountNames, "|") + ")"), + }, + "delegates": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateRegexp(ServiceAccountLinkRegex), + }, + }, + "jwt": { + Type: schema.TypeString, + Sensitive: true, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleServiceAccountJwtRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + userAgent, err := generateUserAgentString(d, config.userAgent) + + if err != nil { + return err + } + + name := fmt.Sprintf("projects/-/serviceAccounts/%s", d.Get("target_service_account").(string)) + + jwtRequest := &iamcredentials.SignJwtRequest{ + Payload: d.Get("payload").(string), + Delegates: convertStringSet(d.Get("delegates").(*schema.Set)), + } + + service := config.NewIamCredentialsClient(userAgent) + + jwtResponse, err := service.Projects.ServiceAccounts.SignJwt(name, jwtRequest).Do() + + if err != nil { + return fmt.Errorf("error calling iamcredentials.SignJwt: %w", err) + } + + d.SetId(name) + + if err := d.Set("jwt", jwtResponse.SignedJwt); err != nil { + return fmt.Errorf("error setting jwt attribute: %w", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_sql_backup_run.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_sql_backup_run.go index ec7135732a..d6d3f530f4 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_sql_backup_run.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_sql_backup_run.go @@ -29,6 +29,12 @@ func dataSourceSqlBackupRun() *schema.Resource { Computed: true, Description: `Location of the backups.`, }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Project ID of the project that contains the instance.`, + }, "start_time": { Type: schema.TypeString, Computed: true, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_storage_object_signed_url.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_storage_object_signed_url.go index 938c72f8f3..445e1af8fe 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_storage_object_signed_url.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/data_source_storage_object_signed_url.go @@ -14,12 +14,11 @@ import ( "log" "net/url" "os" + "sort" "strconv" "strings" "time" - "sort" - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -170,10 +169,9 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err // loadJwtConfig looks for credentials json in the following places, // in order of preference: -// 1. `credentials` attribute of the datasource -// 2. `credentials` attribute in the provider definition. -// 3. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 1. `credentials` attribute of the datasource +// 2. `credentials` attribute in the provider definition. +// 3. A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS environment variable. func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error) { config := meta.(*Config) @@ -250,7 +248,6 @@ type UrlData struct { // ------------------- // GET // -// // 1388534400 // bucket/objectname // ------------------- diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/dataproc_metastore_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/dataproc_metastore_operation.go index e1bdc4cc48..4f5b190a54 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/dataproc_metastore_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/dataproc_metastore_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/datastore_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/datastore_operation.go index b07661e853..67a7f53c47 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/datastore_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/datastore_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/dialogflow_cx_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/dialogflow_cx_operation.go index de972457f7..b6a8eb6e19 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/dialogflow_cx_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/dialogflow_cx_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/error_retry_predicates.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/error_retry_predicates.go index 6b3983e171..abb26b2816 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/error_retry_predicates.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/error_retry_predicates.go @@ -97,7 +97,7 @@ func isConnectionResetNetworkError(err error) (bool, string) { // Retry 409s because some APIs like Cloud SQL throw a 409 if concurrent calls // are being made. // -//The only way right now to determine it is a retryable 409 due to +// The only way right now to determine it is a retryable 409 due to // concurrent calls is to look at the contents of the error message. // See https://github.com/hashicorp/terraform-provider-google/issues/3279 func is409OperationInProgressError(err error) (bool, string) { @@ -419,3 +419,14 @@ func isBigTableRetryableError(err error) (bool, string) { return false, "" } + +// Concurrent Apigee operations can fail with a 400 error +func isApigeeRetryableError(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 400 && strings.Contains(strings.ToLower(gerr.Body), "the resource is locked by another operation") { + return true, "Waiting for other concurrent operations to finish" + } + } + + return false, "" +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/filestore_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/filestore_operation.go index 4ede494366..4131fe8686 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/filestore_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/filestore_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/firebase_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/firebase_operation.go index 69e45d6d24..c6b778a6de 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/firebase_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/firebase_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/firestore_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/firestore_operation.go index 8a576ff58d..4f1fab3745 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/firestore_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/firestore_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/game_services_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/game_services_operation.go index 629f895574..4b7b600c52 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/game_services_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/game_services_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/gke_hub_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/gke_hub_operation.go index 61f17bce60..924b86a764 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/gke_hub_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/gke_hub_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam2_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam2_operation.go index 2cf4375214..e2ffa59532 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam2_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam2_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_access_context_manager_access_policy.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_access_context_manager_access_policy.go index 51bf6e405d..430f6c2598 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_access_context_manager_access_policy.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_access_context_manager_access_policy.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api.go index fd5a51ca48..faa8d484d2 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api_config.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api_config.go index dd99d31ada..2a394e8026 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api_config.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_api_config.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_gateway.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_gateway.go index 8b307405f2..748ccb58a2 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_gateway.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_api_gateway_gateway.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_apigee_environment.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_apigee_environment.go index 7dbe271c0f..e81592fb69 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_apigee_environment.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_apigee_environment.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository.go index 11abc48702..3b72b4bc2a 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_artifact_registry_repository.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_beta_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_beta_operation.go index 36a6a2228a..c6d2c8e252 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_beta_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_beta_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_connection.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_connection.go new file mode 100644 index 0000000000..ce4ba86903 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_connection.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var BigqueryConnectionConnectionIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "connection_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type BigqueryConnectionConnectionIamUpdater struct { + project string + location string + connectionId string + d TerraformResourceData + Config *Config +} + +func BigqueryConnectionConnectionIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := getLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("connection_id"); ok { + values["connection_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("connection_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryConnectionConnectionIamUpdater{ + project: values["project"], + location: values["location"], + connectionId: values["connection_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("connection_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting connection_id: %s", err) + } + + return u, nil +} + +func BigqueryConnectionConnectionIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := getLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryConnectionConnectionIamUpdater{ + project: values["project"], + location: values["location"], + connectionId: values["connection_id"], + d: d, + Config: config, + } + if err := d.Set("connection_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting connection_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyConnectionUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyConnectionUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) qualifyConnectionUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{BigqueryConnectionBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/connections/%s", u.project, u.location, u.connectionId), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/connections/%s", u.project, u.location, u.connectionId) +} + +func (u *BigqueryConnectionConnectionIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-bigqueryconnection-connection-%s", u.GetResourceId()) +} + +func (u *BigqueryConnectionConnectionIamUpdater) DescribeResource() string { + return fmt.Sprintf("bigqueryconnection connection %q", u.GetResourceId()) +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_table.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_table.go index eababb290d..0f176a39c5 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_table.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_bigquery_table.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_binary_authorization_attestor.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_binary_authorization_attestor.go index 00bb8b79c8..1f69534471 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_binary_authorization_attestor.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_binary_authorization_attestor.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_run_service.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_run_service.go index 49f592e93b..26d770bf3d 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_run_service.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_run_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_tasks_queue.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_tasks_queue.go new file mode 100644 index 0000000000..651b8a9a1e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloud_tasks_queue.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var CloudTasksQueueIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type CloudTasksQueueIamUpdater struct { + project string + location string + name string + d TerraformResourceData + Config *Config +} + +func CloudTasksQueueIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := getLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/queues/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudTasksQueueIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func CloudTasksQueueIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := getLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/queues/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudTasksQueueIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *CloudTasksQueueIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyQueueUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *CloudTasksQueueIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyQueueUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *CloudTasksQueueIamUpdater) qualifyQueueUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{CloudTasksBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/queues/%s", u.project, u.location, u.name), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *CloudTasksQueueIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/queues/%s", u.project, u.location, u.name) +} + +func (u *CloudTasksQueueIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudtasks-queue-%s", u.GetResourceId()) +} + +func (u *CloudTasksQueueIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudtasks queue %q", u.GetResourceId()) +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function.go index e607f6060b..652c57cc52 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions2_function.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions_function.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions_function.go index 1cc5c3d5fa..0f7c69361b 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions_function.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudfunctions_function.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudiot_registry.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudiot_registry.go new file mode 100644 index 0000000000..7e980839ae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_cloudiot_registry.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var CloudIotDeviceRegistryIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type CloudIotDeviceRegistryIamUpdater struct { + project string + region string + name string + d TerraformResourceData + Config *Config +} + +func CloudIotDeviceRegistryIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + region, _ := getRegion(d, config) + if region != "" { + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + } + values["region"] = region + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/registries/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudIotDeviceRegistryIamUpdater{ + project: values["project"], + region: values["region"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", u.region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func CloudIotDeviceRegistryIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + region, _ := getRegion(d, config) + if region != "" { + values["region"] = region + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/registries/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudIotDeviceRegistryIamUpdater{ + project: values["project"], + region: values["region"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyDeviceRegistryUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyDeviceRegistryUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) qualifyDeviceRegistryUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{CloudIotBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/registries/%s", u.project, u.region, u.name), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/registries/%s", u.project, u.region, u.name) +} + +func (u *CloudIotDeviceRegistryIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudiot-deviceregistry-%s", u.GetResourceId()) +} + +func (u *CloudIotDeviceRegistryIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudiot deviceregistry %q", u.GetResourceId()) +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_bucket.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_bucket.go new file mode 100644 index 0000000000..ae60dbdb42 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_bucket.go @@ -0,0 +1,199 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var ComputeBackendBucketIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type ComputeBackendBucketIamUpdater struct { + project string + name string + d TerraformResourceData + Config *Config +} + +func ComputeBackendBucketIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/backendBuckets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeBackendBucketIamUpdater{ + project: values["project"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func ComputeBackendBucketIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/backendBuckets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeBackendBucketIamUpdater{ + project: values["project"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ComputeBackendBucketIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyBackendBucketUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ComputeBackendBucketIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyBackendBucketUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ComputeBackendBucketIamUpdater) qualifyBackendBucketUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/global/backendBuckets/%s", u.project, u.name), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ComputeBackendBucketIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/global/backendBuckets/%s", u.project, u.name) +} + +func (u *ComputeBackendBucketIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-compute-backendbucket-%s", u.GetResourceId()) +} + +func (u *ComputeBackendBucketIamUpdater) DescribeResource() string { + return fmt.Sprintf("compute backendbucket %q", u.GetResourceId()) +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_service.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_service.go index 843825a900..064fabfb81 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_service.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_backend_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk.go index 1589bd95ff..77b791525e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_disk.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_image.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_image.go index dd9d7de81d..f75c49ac57 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_image.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_image.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance.go index 5218a71e1b..4e3c46c94b 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_instance.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image.go index 5275396cc6..a80307db60 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_machine_image.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_backend_service.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_backend_service.go index d015340ffd..1f457544d5 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_backend_service.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_backend_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk.go index 94b383fcc4..3d54d8b16e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_region_disk.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_snapshot.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_snapshot.go new file mode 100644 index 0000000000..4247803521 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_snapshot.go @@ -0,0 +1,199 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var ComputeSnapshotIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type ComputeSnapshotIamUpdater struct { + project string + name string + d TerraformResourceData + Config *Config +} + +func ComputeSnapshotIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeSnapshotIamUpdater{ + project: values["project"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func ComputeSnapshotIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeSnapshotIamUpdater{ + project: values["project"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ComputeSnapshotIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifySnapshotUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ComputeSnapshotIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifySnapshotUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ComputeSnapshotIamUpdater) qualifySnapshotUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/global/snapshots/%s", u.project, u.name), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ComputeSnapshotIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/global/snapshots/%s", u.project, u.name) +} + +func (u *ComputeSnapshotIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-compute-snapshot-%s", u.GetResourceId()) +} + +func (u *ComputeSnapshotIamUpdater) DescribeResource() string { + return fmt.Sprintf("compute snapshot %q", u.GetResourceId()) +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_subnetwork.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_subnetwork.go index 7c18f1ee7f..18852413ad 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_subnetwork.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_compute_subnetwork.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_entry_group.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_entry_group.go index 1293c5d0b5..a90ed7a0a2 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_entry_group.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_entry_group.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_policy_tag.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_policy_tag.go index bafde46023..c9d3a8acc9 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_policy_tag.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_policy_tag.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_tag_template.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_tag_template.go index 0385551589..5ce5a0a398 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_tag_template.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_tag_template.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_taxonomy.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_taxonomy.go index 55c1b600f8..4fb9008faa 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_taxonomy.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_data_catalog_taxonomy.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_autoscaling_policy.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_autoscaling_policy.go new file mode 100644 index 0000000000..f0d5abb3ff --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_autoscaling_policy.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var DataprocAutoscalingPolicyIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "policy_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type DataprocAutoscalingPolicyIamUpdater struct { + project string + location string + policyId string + d TerraformResourceData + Config *Config +} + +func DataprocAutoscalingPolicyIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := getLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("policy_id"); ok { + values["policy_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/autoscalingPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("policy_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocAutoscalingPolicyIamUpdater{ + project: values["project"], + location: values["location"], + policyId: values["policy_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("policy_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting policy_id: %s", err) + } + + return u, nil +} + +func DataprocAutoscalingPolicyIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := getLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/autoscalingPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocAutoscalingPolicyIamUpdater{ + project: values["project"], + location: values["location"], + policyId: values["policy_id"], + d: d, + Config: config, + } + if err := d.Set("policy_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting policy_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyAutoscalingPolicyUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyAutoscalingPolicyUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) qualifyAutoscalingPolicyUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataprocBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s", u.project, u.location, u.policyId), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s", u.project, u.location, u.policyId) +} + +func (u *DataprocAutoscalingPolicyIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataproc-autoscalingpolicy-%s", u.GetResourceId()) +} + +func (u *DataprocAutoscalingPolicyIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataproc autoscalingpolicy %q", u.GetResourceId()) +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_federation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_federation.go new file mode 100644 index 0000000000..18dcf61d07 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_federation.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var DataprocMetastoreFederationIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "federation_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type DataprocMetastoreFederationIamUpdater struct { + project string + location string + federationId string + d TerraformResourceData + Config *Config +} + +func DataprocMetastoreFederationIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := getLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("federation_id"); ok { + values["federation_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/federations/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("federation_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocMetastoreFederationIamUpdater{ + project: values["project"], + location: values["location"], + federationId: values["federation_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("federation_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting federation_id: %s", err) + } + + return u, nil +} + +func DataprocMetastoreFederationIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := getLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/federations/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocMetastoreFederationIamUpdater{ + project: values["project"], + location: values["location"], + federationId: values["federation_id"], + d: d, + Config: config, + } + if err := d.Set("federation_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting federation_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataprocMetastoreFederationIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyFederationUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataprocMetastoreFederationIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyFederationUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocMetastoreFederationIamUpdater) qualifyFederationUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataprocMetastoreBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/federations/%s", u.project, u.location, u.federationId), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataprocMetastoreFederationIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/federations/%s", u.project, u.location, u.federationId) +} + +func (u *DataprocMetastoreFederationIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataprocmetastore-federation-%s", u.GetResourceId()) +} + +func (u *DataprocMetastoreFederationIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataprocmetastore federation %q", u.GetResourceId()) +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_service.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_service.go new file mode 100644 index 0000000000..bcc3bc91b2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_dataproc_metastore_service.go @@ -0,0 +1,223 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var DataprocMetastoreServiceIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "service_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type DataprocMetastoreServiceIamUpdater struct { + project string + location string + serviceId string + d TerraformResourceData + Config *Config +} + +func DataprocMetastoreServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := getLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("service_id"); ok { + values["service_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocMetastoreServiceIamUpdater{ + project: values["project"], + location: values["location"], + serviceId: values["service_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("service_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting service_id: %s", err) + } + + return u, nil +} + +func DataprocMetastoreServiceIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := getLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocMetastoreServiceIamUpdater{ + project: values["project"], + location: values["location"], + serviceId: values["service_id"], + d: d, + Config: config, + } + if err := d.Set("service_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting service_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataprocMetastoreServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyServiceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataprocMetastoreServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyServiceUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocMetastoreServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataprocMetastoreBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.serviceId), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataprocMetastoreServiceIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.serviceId) +} + +func (u *DataprocMetastoreServiceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataprocmetastore-service-%s", u.GetResourceId()) +} + +func (u *DataprocMetastoreServiceIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataprocmetastore service %q", u.GetResourceId()) +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service.go index bf8d859264..a26da84ce3 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service_consumers.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service_consumers.go index e95202a874..3ac5cfc429 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service_consumers.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_endpoints_service_consumers.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_gke_hub_membership.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_gke_hub_membership.go new file mode 100644 index 0000000000..d916f4590c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_gke_hub_membership.go @@ -0,0 +1,199 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var GKEHubMembershipIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "membership_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, +} + +type GKEHubMembershipIamUpdater struct { + project string + membershipId string + d TerraformResourceData + Config *Config +} + +func GKEHubMembershipIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("membership_id"); ok { + values["membership_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/memberships/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("membership_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEHubMembershipIamUpdater{ + project: values["project"], + membershipId: values["membership_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("membership_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting membership_id: %s", err) + } + + return u, nil +} + +func GKEHubMembershipIdParseFunc(d *schema.ResourceData, config *Config) error { + values := make(map[string]string) + + project, _ := getProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/memberships/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEHubMembershipIamUpdater{ + project: values["project"], + membershipId: values["membership_id"], + d: d, + Config: config, + } + if err := d.Set("membership_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting membership_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *GKEHubMembershipIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyMembershipUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := getProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return nil, err + } + + policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *GKEHubMembershipIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyMembershipUrl("setIamPolicy") + if err != nil { + return err + } + project, err := getProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) + if err != nil { + return err + } + + _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *GKEHubMembershipIamUpdater) qualifyMembershipUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{GKEHubBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/global/memberships/%s", u.project, u.membershipId), methodIdentifier) + url, err := replaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *GKEHubMembershipIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/global/memberships/%s", u.project, u.membershipId) +} + +func (u *GKEHubMembershipIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-gkehub-membership-%s", u.GetResourceId()) +} + +func (u *GKEHubMembershipIamUpdater) DescribeResource() string { + return fmt.Sprintf("gkehub membership %q", u.GetResourceId()) +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_healthcare_consent_store.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_healthcare_consent_store.go index 6a0c83c5c9..8c5dfcfc5c 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_healthcare_consent_store.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_healthcare_consent_store.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_service.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_service.go index 3ad94c8bdf..c4340d29c5 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_service.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_version.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_version.go index 9168020cfe..65f567009e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_version.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_app_engine_version.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel.go index 618ce5c72a..0ce966c0fc 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance.go index dabc9c4d4b..b9bf4ba1ff 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_tunnel_instance.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web.go index 994c2f4816..84abcba2d0 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_backend_service.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_backend_service.go index 97900452a7..72886bfe28 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_backend_service.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_backend_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_app_engine.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_app_engine.go index 723bc87a01..f9540ecb33 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_app_engine.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_app_engine.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_compute.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_compute.go index 2ebd9690d1..fadfe7226a 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_compute.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_iap_web_type_compute.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_instance.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_instance.go index 7a7e42f587..b92d80e3c8 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_instance.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_instance.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_runtime.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_runtime.go index b5e1564939..0306ea8cfd 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_runtime.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_notebooks_runtime.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_ca_pool.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_ca_pool.go index 86e9afecc5..4e6ac1aca7 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_ca_pool.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_ca_pool.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_certificate_template.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_certificate_template.go index acefe6605b..b8f0ffe31d 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_certificate_template.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_privateca_certificate_template.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_pubsub_topic.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_pubsub_topic.go index a4b9d4a76e..5d87c9b0c0 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_pubsub_topic.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_pubsub_topic.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_runtimeconfig_config.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_runtimeconfig_config.go index 13c7e8ff79..6839ffc32b 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_runtimeconfig_config.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_runtimeconfig_config.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_secret_manager_secret.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_secret_manager_secret.go index b4a453b12e..34feb85f8d 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_secret_manager_secret.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_secret_manager_secret.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_namespace.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_namespace.go index bc9932e7e8..df172f7af0 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_namespace.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_namespace.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_service.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_service.go index 4db2c80f25..afbd6a0fa7 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_service.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_service_directory_service.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_sourcerepo_repository.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_sourcerepo_repository.go index ad353d07d7..5740368446 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_sourcerepo_repository.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_sourcerepo_repository.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_storage_bucket.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_storage_bucket.go index d53f8568bf..ea23f944ea 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_storage_bucket.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_storage_bucket.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_key.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_key.go index f424b2bcd8..1091b1afef 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_key.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_key.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_value.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_value.go index 59c34056ec..01de025826 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_value.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/iam_tags_tag_value.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/image.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/image.go index 11ec84a8e3..21b17dc896 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/image.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/image.go @@ -26,7 +26,7 @@ var ( resolveImageLink = regexp.MustCompile(fmt.Sprintf("^https://www.googleapis.com/compute/[a-z0-9]+/projects/(%s)/global/images/(%s)", ProjectRegex, resolveImageImageRegex)) windowsSqlImage = regexp.MustCompile("^sql-(?:server-)?([0-9]{4})-([a-z]+)-windows-(?:server-)?([0-9]{4})(?:-r([0-9]+))?-dc-v[0-9]+$") - canonicalUbuntuLtsImage = regexp.MustCompile("^ubuntu-(minimal-)?([0-9]+)-") + canonicalUbuntuLtsImage = regexp.MustCompile("^ubuntu-(minimal-)?([0-9]+)(?:.*(arm64))?.*$") cosLtsImage = regexp.MustCompile("^cos-([0-9]+)-") ) @@ -78,13 +78,18 @@ func sanityTestRegexMatches(expected int, got []string, regexType, name string) // If it's in the form global/images/{image}, return it // If it's in the form global/images/family/{family}, return it // If it's in the form family/{family}, check if it's a family in the current project. If it is, return it as global/images/family/{family}. -// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family}. +// +// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family}. +// // If it's in the form {project}/{family-or-image}, check if it's an image in the named project. If it is, return it as projects/{project}/global/images/{image}. -// If not, check if it's a family in the named project. If it is, return it as projects/{project}/global/images/family/{family}. +// +// If not, check if it's a family in the named project. If it is, return it as projects/{project}/global/images/family/{family}. +// // If it's in the form {family-or-image}, check if it's an image in the current project. If it is, return it as global/images/{image}. -// If not, check if it could be a GCP-provided image, and if it exists. If it does, return it as projects/{project}/global/images/{image}. -// If not, check if it's a family in the current project. If it is, return it as global/images/family/{family}. -// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family} +// +// If not, check if it could be a GCP-provided image, and if it exists. If it does, return it as projects/{project}/global/images/{image}. +// If not, check if it's a family in the current project. If it is, return it as global/images/family/{family}. +// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family} func resolveImage(c *Config, project, name, userAgent string) (string, error) { var builtInProject string for k, v := range imageMap { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/memcache_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/memcache_operation.go index 056bd6aa87..a0ebc7fb7f 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/memcache_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/memcache_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/ml_engine_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/ml_engine_operation.go index d4ffc0f92c..923108f9aa 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/ml_engine_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/ml_engine_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_management_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_management_operation.go index f31a6d4fe7..4f2644a15f 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_management_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_management_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_services_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_services_operation.go index 5e09e88fac..b7c09a4ce1 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_services_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/network_services_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/node_config.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/node_config.go index 41b23d5ab3..643f15d319 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/node_config.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/node_config.go @@ -182,6 +182,7 @@ func schemaNodeConfig() *schema.Schema { "min_cpu_platform": { Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, Description: `Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.`, }, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/notebooks_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/notebooks_operation.go index fc4c8668e5..18213a3474 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/notebooks_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/notebooks_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_ca_utils.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_ca_utils.go new file mode 100644 index 0000000000..6cc7b6d72b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_ca_utils.go @@ -0,0 +1,221 @@ +package google + +import ( + "fmt" + "log" + "math/rand" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// CA related utilities. + +func enableCA(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + enableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:enable") + if err != nil { + return err + } + + log.Printf("[DEBUG] Enabling CertificateAuthority") + + res, err := sendRequest(config, "POST", billingProject, enableUrl, userAgent, nil) + if err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %s", err) + } + + var opRes map[string]interface{} + err = privatecaOperationWaitTimeWithResponse( + config, res, &opRes, project, "Enabling CertificateAuthority", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting to enable CertificateAuthority: %s", err) + } + return nil +} + +func disableCA(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + disableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:disable") + if err != nil { + return err + } + + log.Printf("[DEBUG] Disabling CA") + + dRes, err := sendRequest(config, "POST", billingProject, disableUrl, userAgent, nil) + if err != nil { + return fmt.Errorf("Error disabling CA: %s", err) + } + + var opRes map[string]interface{} + err = privatecaOperationWaitTimeWithResponse( + config, dRes, &opRes, project, "Disabling CA", userAgent, + d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf("Error waiting to disable CA: %s", err) + } + return nil +} + +func activateSubCAWithThirdPartyIssuer(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + // 1. prepare parameters + signedCACert := d.Get("pem_ca_certificate").(string) + + sc, ok := d.GetOk("subordinate_config") + if !ok { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + c := sc.([]interface{}) + if len(c) == 0 || c[0] == nil { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + chain, ok := c[0].(map[string]interface{})["pem_issuer_chain"] + if !ok { + return fmt.Errorf("subordinate_config.pem_issuer_chain is required to activate subordinate CA with third party issuer") + } + issuerChain := chain.([]interface{}) + if len(issuerChain) == 0 || issuerChain[0] == nil { + return fmt.Errorf("subordinate_config.pem_issuer_chain is required to activate subordinate CA with third party issuer") + } + pc := issuerChain[0].(map[string]interface{})["pem_certificates"].([]interface{}) + pemIssuerChain := make([]string, 0, len(pc)) + for _, pem := range pc { + pemIssuerChain = append(pemIssuerChain, pem.(string)) + } + + // 2. activate CA + activateObj := make(map[string]interface{}) + activateObj["pemCaCertificate"] = signedCACert + activateObj["subordinateConfig"] = make(map[string]interface{}) + activateObj["subordinateConfig"].(map[string]interface{})["pemIssuerChain"] = make(map[string]interface{}) + activateObj["subordinateConfig"].(map[string]interface{})["pemIssuerChain"].(map[string]interface{})["pemCertificates"] = pemIssuerChain + + activateUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:activate") + if err != nil { + return err + } + + log.Printf("[DEBUG] Activating CertificateAuthority: %#v", activateObj) + res, err := sendRequest(config, "POST", billingProject, activateUrl, userAgent, activateObj) + if err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %s", err) + } + + var opRes map[string]interface{} + err = privatecaOperationWaitTimeWithResponse( + config, res, &opRes, project, "Activating CertificateAuthority", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting to actiavte CertificateAuthority: %s", err) + } + return nil +} + +func activateSubCAWithFirstPartyIssuer(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + // 1. get issuer + sc, ok := d.GetOk("subordinate_config") + if !ok { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + c := sc.([]interface{}) + if len(c) == 0 || c[0] == nil { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + ca, ok := c[0].(map[string]interface{})["certificate_authority"] + if !ok { + return fmt.Errorf("subordinate_config.certificate_authority is required to activate subordinate CA with first party issuer") + } + issuer := ca.(string) + + // 2. fetch CSR + fetchCSRUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:fetch") + if err != nil { + return err + } + res, err := sendRequest(config, "GET", billingProject, fetchCSRUrl, userAgent, nil) + if err != nil { + return fmt.Errorf("failed to fetch CSR: %v", err) + } + csr := res["pemCsr"] + + // 3. sign the CSR with first party issuer + genCertId := func() string { + currentTime := time.Now() + dateStr := currentTime.Format("20060102") + + rand.Seed(time.Now().UnixNano()) + const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + rand1 := make([]byte, 3) + for i := range rand1 { + rand1[i] = letters[rand.Intn(len(letters))] + } + rand2 := make([]byte, 3) + for i := range rand2 { + rand2[i] = letters[rand.Intn(len(letters))] + } + return fmt.Sprintf("subordinate-%v-%v-%v", dateStr, string(rand1), string(rand2)) + } + + // parseCAName parses a CA name and return the CaPool name and CaId. + parseCAName := func(n string) (string, string, error) { + parts := regexp.MustCompile(`(projects/[a-z0-9-]+/locations/[a-z0-9-]+/caPools/[a-zA-Z0-9-]+)/certificateAuthorities/([a-zA-Z0-9-]+)`).FindStringSubmatch(n) + if len(parts) != 3 { + return "", "", fmt.Errorf("failed to parse CA name: %v, parts: %v", n, parts) + } + return parts[1], parts[2], err + } + + obj := make(map[string]interface{}) + obj["pemCsr"] = csr + obj["lifetime"] = d.Get("lifetime") + + certId := genCertId() + poolName, issuerId, err := parseCAName(issuer) + if err != nil { + return err + } + + PrivatecaBasePath, err := replaceVars(d, config, "{{PrivatecaBasePath}}") + if err != nil { + return err + } + signUrl := fmt.Sprintf("%v%v/certificates?certificateId=%v", PrivatecaBasePath, poolName, certId) + signUrl, err = addQueryParams(signUrl, map[string]string{"issuingCertificateAuthorityId": issuerId}) + if err != nil { + return err + } + + log.Printf("[DEBUG] Signing CA Certificate: %#v", obj) + res, err = sendRequestWithTimeout(config, "POST", billingProject, signUrl, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Certificate: %s", err) + } + signedCACert := res["pemCertificate"] + + // 4. activate sub CA with the signed CA cert. + activateObj := make(map[string]interface{}) + activateObj["pemCaCertificate"] = signedCACert + activateObj["subordinateConfig"] = make(map[string]interface{}) + activateObj["subordinateConfig"].(map[string]interface{})["certificateAuthority"] = issuer + + activateUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:activate") + if err != nil { + return err + } + + log.Printf("[DEBUG] Activating CertificateAuthority: %#v", activateObj) + res, err = sendRequest(config, "POST", billingProject, activateUrl, userAgent, activateObj) + if err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %s", err) + } + + var opRes map[string]interface{} + err = privatecaOperationWaitTimeWithResponse( + config, res, &opRes, project, "Enabling CertificateAuthority", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting to actiavte CertificateAuthority: %s", err) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_operation.go index 9b950536a5..b3a8a13259 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/privateca_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider.go index 175cc4cf93..052801989c 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider.go @@ -797,22 +797,8 @@ func Provider() *schema.Provider { BigtableAdminCustomEndpointEntryKey: BigtableAdminCustomEndpointEntry, // dcl - AssuredWorkloadsEndpointEntryKey: AssuredWorkloadsEndpointEntry, - ClouddeployEndpointEntryKey: ClouddeployEndpointEntry, - CloudResourceManagerEndpointEntryKey: CloudResourceManagerEndpointEntry, - DataplexEndpointEntryKey: DataplexEndpointEntry, - EventarcEndpointEntryKey: EventarcEndpointEntry, - FirebaserulesEndpointEntryKey: FirebaserulesEndpointEntry, - GkeHubFeatureCustomEndpointEntryKey: GkeHubFeatureCustomEndpointEntry, - NetworkConnectivityEndpointEntryKey: NetworkConnectivityEndpointEntry, - OrgPolicyEndpointEntryKey: OrgPolicyEndpointEntry, - PrivatecaCertificateTemplateEndpointEntryKey: PrivatecaCertificateTemplateCustomEndpointEntry, - RecaptchaEnterpriseEndpointEntryKey: RecaptchaEnterpriseEndpointEntry, - ContainerAwsCustomEndpointEntryKey: ContainerAwsCustomEndpointEntry, - ContainerAzureCustomEndpointEntryKey: ContainerAzureCustomEndpointEntry, - ApikeysEndpointEntryKey: ApikeysEndpointEntry, - - CloudBuildWorkerPoolEndpointEntryKey: CloudBuildWorkerPoolEndpointEntry, + ContainerAwsCustomEndpointEntryKey: ContainerAwsCustomEndpointEntry, + ContainerAzureCustomEndpointEntryKey: ContainerAzureCustomEndpointEntry, }, ProviderMetaSchema: map[string]*schema.Schema{ @@ -918,6 +904,7 @@ func Provider() *schema.Provider { "google_service_account": dataSourceGoogleServiceAccount(), "google_service_account_access_token": dataSourceGoogleServiceAccountAccessToken(), "google_service_account_id_token": dataSourceGoogleServiceAccountIdToken(), + "google_service_account_jwt": dataSourceGoogleServiceAccountJwt(), "google_service_account_key": dataSourceGoogleServiceAccountKey(), "google_sourcerepo_repository": dataSourceGoogleSourceRepoRepository(), "google_spanner_instance": dataSourceSpannerInstance(), @@ -944,12 +931,14 @@ func Provider() *schema.Provider { return providerConfigure(ctx, d, provider) } + configureDCLProvider(provider) + return provider } -// Generated resources: 252 -// Generated IAM resources: 141 -// Total generated resources: 393 +// Generated resources: 256 +// Generated IAM resources: 168 +// Total generated resources: 424 func ResourceMap() map[string]*schema.Resource { resourceMap, _ := ResourceMapWithErrors() return resourceMap @@ -972,6 +961,7 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_access_context_manager_service_perimeters": resourceAccessContextManagerServicePerimeters(), "google_access_context_manager_service_perimeter_resource": resourceAccessContextManagerServicePerimeterResource(), "google_access_context_manager_gcp_user_access_binding": resourceAccessContextManagerGcpUserAccessBinding(), + "google_active_directory_peering": resourceActiveDirectoryPeering(), "google_active_directory_domain": resourceActiveDirectoryDomain(), "google_active_directory_domain_trust": resourceActiveDirectoryDomainTrust(), "google_api_gateway_api": resourceApiGatewayApi(), @@ -1015,6 +1005,9 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_bigquery_table_iam_policy": ResourceIamPolicy(BigQueryTableIamSchema, BigQueryTableIamUpdaterProducer, BigQueryTableIdParseFunc), "google_bigquery_routine": resourceBigQueryRoutine(), "google_bigquery_connection": resourceBigqueryConnectionConnection(), + "google_bigquery_connection_iam_binding": ResourceIamBinding(BigqueryConnectionConnectionIamSchema, BigqueryConnectionConnectionIamUpdaterProducer, BigqueryConnectionConnectionIdParseFunc), + "google_bigquery_connection_iam_member": ResourceIamMember(BigqueryConnectionConnectionIamSchema, BigqueryConnectionConnectionIamUpdaterProducer, BigqueryConnectionConnectionIdParseFunc), + "google_bigquery_connection_iam_policy": ResourceIamPolicy(BigqueryConnectionConnectionIamSchema, BigqueryConnectionConnectionIamUpdaterProducer, BigqueryConnectionConnectionIdParseFunc), "google_bigquery_data_transfer_config": resourceBigqueryDataTransferConfig(), "google_bigquery_reservation": resourceBigqueryReservationReservation(), "google_bigtable_app_profile": resourceBigtableAppProfile(), @@ -1026,6 +1019,8 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_binary_authorization_policy": resourceBinaryAuthorizationPolicy(), "google_certificate_manager_dns_authorization": resourceCertificateManagerDnsAuthorization(), "google_certificate_manager_certificate": resourceCertificateManagerCertificate(), + "google_certificate_manager_certificate_map": resourceCertificateManagerCertificateMap(), + "google_certificate_manager_certificate_map_entry": resourceCertificateManagerCertificateMapEntry(), "google_cloud_asset_project_feed": resourceCloudAssetProjectFeed(), "google_cloud_asset_folder_feed": resourceCloudAssetFolderFeed(), "google_cloud_asset_organization_feed": resourceCloudAssetOrganizationFeed(), @@ -1040,6 +1035,9 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_cloud_identity_group": resourceCloudIdentityGroup(), "google_cloud_identity_group_membership": resourceCloudIdentityGroupMembership(), "google_cloudiot_registry": resourceCloudIotDeviceRegistry(), + "google_cloudiot_registry_iam_binding": ResourceIamBinding(CloudIotDeviceRegistryIamSchema, CloudIotDeviceRegistryIamUpdaterProducer, CloudIotDeviceRegistryIdParseFunc), + "google_cloudiot_registry_iam_member": ResourceIamMember(CloudIotDeviceRegistryIamSchema, CloudIotDeviceRegistryIamUpdaterProducer, CloudIotDeviceRegistryIdParseFunc), + "google_cloudiot_registry_iam_policy": ResourceIamPolicy(CloudIotDeviceRegistryIamSchema, CloudIotDeviceRegistryIamUpdaterProducer, CloudIotDeviceRegistryIdParseFunc), "google_cloudiot_device": resourceCloudIotDevice(), "google_cloud_run_domain_mapping": resourceCloudRunDomainMapping(), "google_cloud_run_service": resourceCloudRunService(), @@ -1048,9 +1046,15 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_cloud_run_service_iam_policy": ResourceIamPolicy(CloudRunServiceIamSchema, CloudRunServiceIamUpdaterProducer, CloudRunServiceIdParseFunc), "google_cloud_scheduler_job": resourceCloudSchedulerJob(), "google_cloud_tasks_queue": resourceCloudTasksQueue(), + "google_cloud_tasks_queue_iam_binding": ResourceIamBinding(CloudTasksQueueIamSchema, CloudTasksQueueIamUpdaterProducer, CloudTasksQueueIdParseFunc), + "google_cloud_tasks_queue_iam_member": ResourceIamMember(CloudTasksQueueIamSchema, CloudTasksQueueIamUpdaterProducer, CloudTasksQueueIdParseFunc), + "google_cloud_tasks_queue_iam_policy": ResourceIamPolicy(CloudTasksQueueIamSchema, CloudTasksQueueIamUpdaterProducer, CloudTasksQueueIdParseFunc), "google_compute_address": resourceComputeAddress(), "google_compute_autoscaler": resourceComputeAutoscaler(), "google_compute_backend_bucket": resourceComputeBackendBucket(), + "google_compute_backend_bucket_iam_binding": ResourceIamBinding(ComputeBackendBucketIamSchema, ComputeBackendBucketIamUpdaterProducer, ComputeBackendBucketIdParseFunc), + "google_compute_backend_bucket_iam_member": ResourceIamMember(ComputeBackendBucketIamSchema, ComputeBackendBucketIamUpdaterProducer, ComputeBackendBucketIdParseFunc), + "google_compute_backend_bucket_iam_policy": ResourceIamPolicy(ComputeBackendBucketIamSchema, ComputeBackendBucketIamUpdaterProducer, ComputeBackendBucketIdParseFunc), "google_compute_backend_bucket_signed_url_key": resourceComputeBackendBucketSignedUrlKey(), "google_compute_backend_service": resourceComputeBackendService(), "google_compute_backend_service_iam_binding": ResourceIamBinding(ComputeBackendServiceIamSchema, ComputeBackendServiceIamUpdaterProducer, ComputeBackendServiceIdParseFunc), @@ -1115,6 +1119,9 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_compute_router_nat": resourceComputeRouterNat(), "google_compute_router_peer": resourceComputeRouterBgpPeer(), "google_compute_snapshot": resourceComputeSnapshot(), + "google_compute_snapshot_iam_binding": ResourceIamBinding(ComputeSnapshotIamSchema, ComputeSnapshotIamUpdaterProducer, ComputeSnapshotIdParseFunc), + "google_compute_snapshot_iam_member": ResourceIamMember(ComputeSnapshotIamSchema, ComputeSnapshotIamUpdaterProducer, ComputeSnapshotIdParseFunc), + "google_compute_snapshot_iam_policy": ResourceIamPolicy(ComputeSnapshotIamSchema, ComputeSnapshotIamUpdaterProducer, ComputeSnapshotIdParseFunc), "google_compute_ssl_certificate": resourceComputeSslCertificate(), "google_compute_managed_ssl_certificate": resourceComputeManagedSslCertificate(), "google_compute_region_ssl_certificate": resourceComputeRegionSslCertificate(), @@ -1165,7 +1172,17 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_data_loss_prevention_stored_info_type": resourceDataLossPreventionStoredInfoType(), "google_data_loss_prevention_deidentify_template": resourceDataLossPreventionDeidentifyTemplate(), "google_dataproc_autoscaling_policy": resourceDataprocAutoscalingPolicy(), + "google_dataproc_autoscaling_policy_iam_binding": ResourceIamBinding(DataprocAutoscalingPolicyIamSchema, DataprocAutoscalingPolicyIamUpdaterProducer, DataprocAutoscalingPolicyIdParseFunc), + "google_dataproc_autoscaling_policy_iam_member": ResourceIamMember(DataprocAutoscalingPolicyIamSchema, DataprocAutoscalingPolicyIamUpdaterProducer, DataprocAutoscalingPolicyIdParseFunc), + "google_dataproc_autoscaling_policy_iam_policy": ResourceIamPolicy(DataprocAutoscalingPolicyIamSchema, DataprocAutoscalingPolicyIamUpdaterProducer, DataprocAutoscalingPolicyIdParseFunc), "google_dataproc_metastore_service": resourceDataprocMetastoreService(), + "google_dataproc_metastore_service_iam_binding": ResourceIamBinding(DataprocMetastoreServiceIamSchema, DataprocMetastoreServiceIamUpdaterProducer, DataprocMetastoreServiceIdParseFunc), + "google_dataproc_metastore_service_iam_member": ResourceIamMember(DataprocMetastoreServiceIamSchema, DataprocMetastoreServiceIamUpdaterProducer, DataprocMetastoreServiceIdParseFunc), + "google_dataproc_metastore_service_iam_policy": ResourceIamPolicy(DataprocMetastoreServiceIamSchema, DataprocMetastoreServiceIamUpdaterProducer, DataprocMetastoreServiceIdParseFunc), + "google_dataproc_metastore_federation": resourceDataprocMetastoreFederation(), + "google_dataproc_metastore_federation_iam_binding": ResourceIamBinding(DataprocMetastoreFederationIamSchema, DataprocMetastoreFederationIamUpdaterProducer, DataprocMetastoreFederationIdParseFunc), + "google_dataproc_metastore_federation_iam_member": ResourceIamMember(DataprocMetastoreFederationIamSchema, DataprocMetastoreFederationIamUpdaterProducer, DataprocMetastoreFederationIdParseFunc), + "google_dataproc_metastore_federation_iam_policy": ResourceIamPolicy(DataprocMetastoreFederationIamSchema, DataprocMetastoreFederationIamUpdaterProducer, DataprocMetastoreFederationIdParseFunc), "google_datastore_index": resourceDatastoreIndex(), "google_deployment_manager_deployment": resourceDeploymentManagerDeployment(), "google_dialogflow_agent": resourceDialogflowAgent(), @@ -1198,6 +1215,9 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_game_services_game_server_config": resourceGameServicesGameServerConfig(), "google_game_services_game_server_deployment_rollout": resourceGameServicesGameServerDeploymentRollout(), "google_gke_hub_membership": resourceGKEHubMembership(), + "google_gke_hub_membership_iam_binding": ResourceIamBinding(GKEHubMembershipIamSchema, GKEHubMembershipIamUpdaterProducer, GKEHubMembershipIdParseFunc), + "google_gke_hub_membership_iam_member": ResourceIamMember(GKEHubMembershipIamSchema, GKEHubMembershipIamUpdaterProducer, GKEHubMembershipIdParseFunc), + "google_gke_hub_membership_iam_policy": ResourceIamPolicy(GKEHubMembershipIamSchema, GKEHubMembershipIamUpdaterProducer, GKEHubMembershipIdParseFunc), "google_healthcare_dataset": resourceHealthcareDataset(), "google_healthcare_dicom_store": resourceHealthcareDicomStore(), "google_healthcare_fhir_store": resourceHealthcareFhirStore(), @@ -1357,7 +1377,6 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { // ####### START handwritten resources ########### "google_app_engine_application": resourceAppEngineApplication(), "google_bigquery_table": resourceBigQueryTable(), - "google_bigquery_reservation_assignment": resourceBigqueryReservationAssignment(), "google_bigtable_gc_policy": resourceBigtableGCPolicy(), "google_bigtable_instance": resourceBigtableInstance(), "google_bigtable_table": resourceBigtableTable(), @@ -1434,38 +1453,6 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_storage_transfer_job": resourceStorageTransferJob(), // ####### END handwritten resources ########### }, - map[string]*schema.Resource{ - // ####### START tpgtools resources ########### - "google_apikeys_key": resourceApikeysKey(), - "google_assured_workloads_workload": resourceAssuredWorkloadsWorkload(), - "google_cloudbuild_worker_pool": resourceCloudbuildWorkerPool(), - "google_clouddeploy_delivery_pipeline": resourceClouddeployDeliveryPipeline(), - "google_clouddeploy_target": resourceClouddeployTarget(), - "google_compute_firewall_policy_association": resourceComputeFirewallPolicyAssociation(), - "google_compute_firewall_policy": resourceComputeFirewallPolicy(), - "google_compute_firewall_policy_rule": resourceComputeFirewallPolicyRule(), - "google_container_aws_cluster": resourceContainerAwsCluster(), - "google_container_aws_node_pool": resourceContainerAwsNodePool(), - "google_container_azure_client": resourceContainerAzureClient(), - "google_container_azure_cluster": resourceContainerAzureCluster(), - "google_container_azure_node_pool": resourceContainerAzureNodePool(), - "google_dataplex_lake": resourceDataplexLake(), - "google_dataproc_workflow_template": resourceDataprocWorkflowTemplate(), - "google_eventarc_trigger": resourceEventarcTrigger(), - "google_firebaserules_release": resourceFirebaserulesRelease(), - "google_firebaserules_ruleset": resourceFirebaserulesRuleset(), - "google_gke_hub_feature": resourceGkeHubFeature(), - "google_gke_hub_feature_membership": resourceGkeHubFeatureMembership(), - "google_logging_log_view": resourceLoggingLogView(), - "google_monitoring_monitored_project": resourceMonitoringMonitoredProject(), - "google_network_connectivity_hub": resourceNetworkConnectivityHub(), - "google_network_connectivity_spoke": resourceNetworkConnectivitySpoke(), - "google_org_policy_policy": resourceOrgPolicyPolicy(), - "google_os_config_os_policy_assignment": resourceOsConfigOsPolicyAssignment(), - "google_privateca_certificate_template": resourcePrivatecaCertificateTemplate(), - "google_recaptcha_enterprise_key": resourceRecaptchaEnterpriseKey(), - // ####### END tpgtools resources ########### - }, map[string]*schema.Resource{ // ####### START non-generated IAM resources ########### "google_bigtable_instance_iam_binding": ResourceIamBinding(IamBigtableInstanceSchema, NewBigtableInstanceUpdater, BigtableInstanceIdParseFunc), @@ -1530,6 +1517,7 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_service_account_iam_policy": ResourceIamPolicy(IamServiceAccountSchema, NewServiceAccountIamUpdater, ServiceAccountIdParseFunc), // ####### END non-generated IAM resources ########### }, + dclResources, ) } @@ -1709,20 +1697,8 @@ func providerConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr config.BigtableAdminBasePath = d.Get(BigtableAdminCustomEndpointEntryKey).(string) // dcl - config.ApikeysBasePath = d.Get(ApikeysEndpointEntryKey).(string) - config.AssuredWorkloadsBasePath = d.Get(AssuredWorkloadsEndpointEntryKey).(string) - config.ClouddeployBasePath = d.Get(ClouddeployEndpointEntryKey).(string) - config.CloudResourceManagerBasePath = d.Get(CloudResourceManagerEndpointEntryKey).(string) - config.DataplexBasePath = d.Get(DataplexEndpointEntryKey).(string) - config.EventarcBasePath = d.Get(EventarcEndpointEntryKey).(string) - config.FirebaserulesBasePath = d.Get(FirebaserulesEndpointEntryKey).(string) - config.GkeHubBasePath = d.Get(GkeHubFeatureCustomEndpointEntryKey).(string) - config.NetworkConnectivityBasePath = d.Get(NetworkConnectivityEndpointEntryKey).(string) - config.OrgPolicyBasePath = d.Get(OrgPolicyEndpointEntryKey).(string) - config.PrivatecaBasePath = d.Get(PrivatecaCertificateTemplateEndpointEntryKey).(string) config.ContainerAwsBasePath = d.Get(ContainerAwsCustomEndpointEntryKey).(string) config.ContainerAzureBasePath = d.Get(ContainerAzureCustomEndpointEntryKey).(string) - config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) stopCtx, ok := schema.StopContext(ctx) if !ok { @@ -1732,7 +1708,7 @@ func providerConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr return nil, diag.FromErr(err) } - return &config, nil + return providerDCLConfigure(d, &config), nil } func validateCredentials(v interface{}, k string) (warnings []string, errors []error) { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_client_creation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_client_creation.go index cfdd062e87..831544ef56 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_client_creation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_client_creation.go @@ -346,7 +346,7 @@ func NewDCLGkeHubClient(config *Config, userAgent, billingProject string, timeou dcl.WithHTTPClient(config.client), dcl.WithUserAgent(userAgent), dcl.WithLogger(dclLogger{}), - dcl.WithBasePath(config.GkeHubBasePath), + dcl.WithBasePath(config.GKEHubFeatureBasePath), } if timeout != 0 { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_endpoints.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_endpoints.go index 04caae2dd6..032b13be16 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_endpoints.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_endpoints.go @@ -67,33 +67,6 @@ var CloudResourceManagerEndpointEntry = &schema.Schema{ }, ""), } -var ComputeEndpointEntryKey = "compute_custom_endpoint" -var ComputeEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_COMPUTE_CUSTOM_ENDPOINT", - }, ""), -} - -var ContainerAwsEndpointEntryKey = "container_aws_custom_endpoint" -var ContainerAwsEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CONTAINER_AWS_CUSTOM_ENDPOINT", - }, ""), -} - -var ContainerAzureEndpointEntryKey = "container_azure_custom_endpoint" -var ContainerAzureEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CONTAINER_AZURE_CUSTOM_ENDPOINT", - }, ""), -} - var DataplexEndpointEntryKey = "dataplex_custom_endpoint" var DataplexEndpointEntry = &schema.Schema{ Type: schema.TypeString, @@ -121,21 +94,12 @@ var FirebaserulesEndpointEntry = &schema.Schema{ }, ""), } -var LoggingEndpointEntryKey = "logging_custom_endpoint" -var LoggingEndpointEntry = &schema.Schema{ +var GKEHubFeatureEndpointEntryKey = "gkehub_feature_custom_endpoint" +var GKEHubFeatureEndpointEntry = &schema.Schema{ Type: schema.TypeString, Optional: true, DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_LOGGING_CUSTOM_ENDPOINT", - }, ""), -} - -var MonitoringEndpointEntryKey = "monitoring_custom_endpoint" -var MonitoringEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_MONITORING_CUSTOM_ENDPOINT", + "GOOGLE_GKEHUB_FEATURE_CUSTOM_ENDPOINT", }, ""), } @@ -157,24 +121,6 @@ var OrgPolicyEndpointEntry = &schema.Schema{ }, ""), } -var OSConfigEndpointEntryKey = "os_config_custom_endpoint" -var OSConfigEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_OS_CONFIG_CUSTOM_ENDPOINT", - }, ""), -} - -var PrivatecaEndpointEntryKey = "privateca_custom_endpoint" -var PrivatecaEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", - }, ""), -} - var RecaptchaEnterpriseEndpointEntryKey = "recaptcha_enterprise_custom_endpoint" var RecaptchaEnterpriseEndpointEntry = &schema.Schema{ Type: schema.TypeString, @@ -184,62 +130,49 @@ var RecaptchaEnterpriseEndpointEntry = &schema.Schema{ }, ""), } -//Add new values to config.go.erb config object declaration -//ApikeysBasePath string -//AssuredWorkloadsBasePath string -//CloudBuildWorkerPoolBasePath string -//ClouddeployBasePath string -//CloudResourceManagerBasePath string -//ComputeBasePath string -//ContainerAwsBasePath string -//ContainerAzureBasePath string -//DataplexBasePath string -//EventarcBasePath string -//FirebaserulesBasePath string -//LoggingBasePath string -//MonitoringBasePath string -//NetworkConnectivityBasePath string -//OrgPolicyBasePath string -//OSConfigBasePath string -//PrivatecaBasePath string -//RecaptchaEnterpriseBasePath string - -//Add new values to provider.go.erb schema initialization -// ApikeysEndpointEntryKey: ApikeysEndpointEntry, -// AssuredWorkloadsEndpointEntryKey: AssuredWorkloadsEndpointEntry, -// CloudBuildWorkerPoolEndpointEntryKey: CloudBuildWorkerPoolEndpointEntry, -// ClouddeployEndpointEntryKey: ClouddeployEndpointEntry, -// CloudResourceManagerEndpointEntryKey: CloudResourceManagerEndpointEntry, -// ComputeEndpointEntryKey: ComputeEndpointEntry, -// ContainerAwsEndpointEntryKey: ContainerAwsEndpointEntry, -// ContainerAzureEndpointEntryKey: ContainerAzureEndpointEntry, -// DataplexEndpointEntryKey: DataplexEndpointEntry, -// EventarcEndpointEntryKey: EventarcEndpointEntry, -// FirebaserulesEndpointEntryKey: FirebaserulesEndpointEntry, -// LoggingEndpointEntryKey: LoggingEndpointEntry, -// MonitoringEndpointEntryKey: MonitoringEndpointEntry, -// NetworkConnectivityEndpointEntryKey: NetworkConnectivityEndpointEntry, -// OrgPolicyEndpointEntryKey: OrgPolicyEndpointEntry, -// OSConfigEndpointEntryKey: OSConfigEndpointEntry, -// PrivatecaEndpointEntryKey: PrivatecaEndpointEntry, -// RecaptchaEnterpriseEndpointEntryKey: RecaptchaEnterpriseEndpointEntry, - -//Add new values to provider.go.erb - provider block read -// config.ApikeysBasePath = d.Get(ApikeysEndpointEntryKey).(string) -// config.AssuredWorkloadsBasePath = d.Get(AssuredWorkloadsEndpointEntryKey).(string) -// config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) -// config.ClouddeployBasePath = d.Get(ClouddeployEndpointEntryKey).(string) -// config.CloudResourceManagerBasePath = d.Get(CloudResourceManagerEndpointEntryKey).(string) -// config.ComputeBasePath = d.Get(ComputeEndpointEntryKey).(string) -// config.ContainerAwsBasePath = d.Get(ContainerAwsEndpointEntryKey).(string) -// config.ContainerAzureBasePath = d.Get(ContainerAzureEndpointEntryKey).(string) -// config.DataplexBasePath = d.Get(DataplexEndpointEntryKey).(string) -// config.EventarcBasePath = d.Get(EventarcEndpointEntryKey).(string) -// config.FirebaserulesBasePath = d.Get(FirebaserulesEndpointEntryKey).(string) -// config.LoggingBasePath = d.Get(LoggingEndpointEntryKey).(string) -// config.MonitoringBasePath = d.Get(MonitoringEndpointEntryKey).(string) -// config.NetworkConnectivityBasePath = d.Get(NetworkConnectivityEndpointEntryKey).(string) -// config.OrgPolicyBasePath = d.Get(OrgPolicyEndpointEntryKey).(string) -// config.OSConfigBasePath = d.Get(OSConfigEndpointEntryKey).(string) -// config.PrivatecaBasePath = d.Get(PrivatecaEndpointEntryKey).(string) -// config.RecaptchaEnterpriseBasePath = d.Get(RecaptchaEnterpriseEndpointEntryKey).(string) +type DCLConfig struct { + ApikeysBasePath string + AssuredWorkloadsBasePath string + CloudBuildWorkerPoolBasePath string + ClouddeployBasePath string + CloudResourceManagerBasePath string + DataplexBasePath string + EventarcBasePath string + FirebaserulesBasePath string + GKEHubFeatureBasePath string + NetworkConnectivityBasePath string + OrgPolicyBasePath string + RecaptchaEnterpriseBasePath string +} + +func configureDCLProvider(provider *schema.Provider) { + provider.Schema[ApikeysEndpointEntryKey] = ApikeysEndpointEntry + provider.Schema[AssuredWorkloadsEndpointEntryKey] = AssuredWorkloadsEndpointEntry + provider.Schema[CloudBuildWorkerPoolEndpointEntryKey] = CloudBuildWorkerPoolEndpointEntry + provider.Schema[ClouddeployEndpointEntryKey] = ClouddeployEndpointEntry + provider.Schema[CloudResourceManagerEndpointEntryKey] = CloudResourceManagerEndpointEntry + provider.Schema[DataplexEndpointEntryKey] = DataplexEndpointEntry + provider.Schema[EventarcEndpointEntryKey] = EventarcEndpointEntry + provider.Schema[FirebaserulesEndpointEntryKey] = FirebaserulesEndpointEntry + provider.Schema[GKEHubFeatureEndpointEntryKey] = GKEHubFeatureEndpointEntry + provider.Schema[NetworkConnectivityEndpointEntryKey] = NetworkConnectivityEndpointEntry + provider.Schema[OrgPolicyEndpointEntryKey] = OrgPolicyEndpointEntry + provider.Schema[RecaptchaEnterpriseEndpointEntryKey] = RecaptchaEnterpriseEndpointEntry +} + +func providerDCLConfigure(d *schema.ResourceData, config *Config) interface{} { + config.ApikeysBasePath = d.Get(ApikeysEndpointEntryKey).(string) + config.AssuredWorkloadsBasePath = d.Get(AssuredWorkloadsEndpointEntryKey).(string) + config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) + config.ClouddeployBasePath = d.Get(ClouddeployEndpointEntryKey).(string) + config.CloudResourceManagerBasePath = d.Get(CloudResourceManagerEndpointEntryKey).(string) + config.DataplexBasePath = d.Get(DataplexEndpointEntryKey).(string) + config.EventarcBasePath = d.Get(EventarcEndpointEntryKey).(string) + config.FirebaserulesBasePath = d.Get(FirebaserulesEndpointEntryKey).(string) + config.GKEHubFeatureBasePath = d.Get(GKEHubFeatureEndpointEntryKey).(string) + config.NetworkConnectivityBasePath = d.Get(NetworkConnectivityEndpointEntryKey).(string) + config.OrgPolicyBasePath = d.Get(OrgPolicyEndpointEntryKey).(string) + config.RecaptchaEnterpriseBasePath = d.Get(RecaptchaEnterpriseEndpointEntryKey).(string) + config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) + return config +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_resources.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_resources.go new file mode 100644 index 0000000000..d5a1ce78b3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_dcl_resources.go @@ -0,0 +1,54 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var dclResources = map[string]*schema.Resource{ + "google_apikeys_key": resourceApikeysKey(), + "google_assured_workloads_workload": resourceAssuredWorkloadsWorkload(), + "google_bigquery_reservation_assignment": resourceBigqueryReservationAssignment(), + "google_cloudbuild_worker_pool": resourceCloudbuildWorkerPool(), + "google_clouddeploy_delivery_pipeline": resourceClouddeployDeliveryPipeline(), + "google_clouddeploy_target": resourceClouddeployTarget(), + "google_compute_firewall_policy": resourceComputeFirewallPolicy(), + "google_compute_firewall_policy_association": resourceComputeFirewallPolicyAssociation(), + "google_compute_firewall_policy_rule": resourceComputeFirewallPolicyRule(), + "google_container_aws_cluster": resourceContainerAwsCluster(), + "google_container_aws_node_pool": resourceContainerAwsNodePool(), + "google_container_azure_client": resourceContainerAzureClient(), + "google_container_azure_cluster": resourceContainerAzureCluster(), + "google_container_azure_node_pool": resourceContainerAzureNodePool(), + "google_dataplex_asset": resourceDataplexAsset(), + "google_dataplex_lake": resourceDataplexLake(), + "google_dataplex_zone": resourceDataplexZone(), + "google_dataproc_workflow_template": resourceDataprocWorkflowTemplate(), + "google_eventarc_trigger": resourceEventarcTrigger(), + "google_firebaserules_release": resourceFirebaserulesRelease(), + "google_firebaserules_ruleset": resourceFirebaserulesRuleset(), + "google_gke_hub_feature": resourceGkeHubFeature(), + "google_gke_hub_feature_membership": resourceGkeHubFeatureMembership(), + "google_logging_log_view": resourceLoggingLogView(), + "google_monitoring_monitored_project": resourceMonitoringMonitoredProject(), + "google_network_connectivity_hub": resourceNetworkConnectivityHub(), + "google_network_connectivity_spoke": resourceNetworkConnectivitySpoke(), + "google_org_policy_policy": resourceOrgPolicyPolicy(), + "google_os_config_os_policy_assignment": resourceOsConfigOsPolicyAssignment(), + "google_privateca_certificate_template": resourcePrivatecaCertificateTemplate(), + "google_recaptcha_enterprise_key": resourceRecaptchaEnterpriseKey(), +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_handwritten_endpoint.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_handwritten_endpoint.go index e69331f529..19c015751e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_handwritten_endpoint.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/provider_handwritten_endpoint.go @@ -128,17 +128,6 @@ var BigtableAdminCustomEndpointEntry = &schema.Schema{ }, DefaultBasePaths[BigtableAdminBasePathKey]), } -// GkeHubFeature uses a different base path "v1beta" than GkeHubMembership "v1beta1" -var GkeHubFeatureCustomEndpointEntryKey = "gkehub_feature_custom_endpoint" -var GkeHubFeatureCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_GKEHUB_FEATURE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[GkeHubFeatureBasePathKey]), -} - var PrivatecaCertificateTemplateEndpointEntryKey = "privateca_custom_endpoint" var PrivatecaCertificateTemplateCustomEndpointEntry = &schema.Schema{ Type: schema.TypeString, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/redis_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/redis_operation.go index 9b758d2761..19751500e3 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/redis_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/redis_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_peering.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_peering.go new file mode 100644 index 0000000000..08fb57c2cc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_active_directory_peering.go @@ -0,0 +1,374 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceActiveDirectoryPeering() *schema.Resource { + return &schema.Resource{ + Create: resourceActiveDirectoryPeeringCreate, + Read: resourceActiveDirectoryPeeringRead, + Update: resourceActiveDirectoryPeeringUpdate, + Delete: resourceActiveDirectoryPeeringDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "authorized_network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The full names of the Google Compute Engine networks to which the instance is connected. Caller needs to make sure that CIDR subnets do not overlap between networks, else peering creation will fail.`, + }, + "domain_resource": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Full domain resource path for the Managed AD Domain involved in peering. The resource path should be in the form projects/{projectId}/locations/global/domains/{domainName}`, + }, + "peering_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: ``, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Resource labels that can contain user-provided metadata`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "status": { + Type: schema.TypeString, + Optional: true, + Description: `The current state of this Peering.`, + }, + "status_message": { + Type: schema.TypeString, + Optional: true, + Description: `Additional information about the current status of this peering, if available.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Unique name of the peering in this scope including projects and location using the form: projects/{projectId}/locations/global/peerings/{peeringId}.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceActiveDirectoryPeeringCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandActiveDirectoryPeeringLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + authorizedNetworkProp, err := expandActiveDirectoryPeeringAuthorizedNetwork(d.Get("authorized_network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("authorized_network"); !isEmptyValue(reflect.ValueOf(authorizedNetworkProp)) && (ok || !reflect.DeepEqual(v, authorizedNetworkProp)) { + obj["authorizedNetwork"] = authorizedNetworkProp + } + domainResourceProp, err := expandActiveDirectoryPeeringDomainResource(d.Get("domain_resource"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("domain_resource"); !isEmptyValue(reflect.ValueOf(domainResourceProp)) && (ok || !reflect.DeepEqual(v, domainResourceProp)) { + obj["domainResource"] = domainResourceProp + } + statusMessageProp, err := expandActiveDirectoryPeeringStatusMessage(d.Get("status_message"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("status_message"); !isEmptyValue(reflect.ValueOf(statusMessageProp)) && (ok || !reflect.DeepEqual(v, statusMessageProp)) { + obj["statusMessage"] = statusMessageProp + } + + url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/peerings?peeringId={{peering_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Peering: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Peering: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Peering: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/domains/{{peering_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = activeDirectoryOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Peering", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Peering: %s", err) + } + + if err := d.Set("name", flattenActiveDirectoryPeeringName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = replaceVars(d, config, "projects/{{project}}/locations/global/domains/{{peering_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Peering %q: %#v", d.Id(), res) + + return resourceActiveDirectoryPeeringRead(d, meta) +} + +func resourceActiveDirectoryPeeringRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Peering: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("ActiveDirectoryPeering %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Peering: %s", err) + } + + if err := d.Set("name", flattenActiveDirectoryPeeringName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Peering: %s", err) + } + if err := d.Set("labels", flattenActiveDirectoryPeeringLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Peering: %s", err) + } + if err := d.Set("authorized_network", flattenActiveDirectoryPeeringAuthorizedNetwork(res["authorizedNetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading Peering: %s", err) + } + if err := d.Set("domain_resource", flattenActiveDirectoryPeeringDomainResource(res["domainResource"], d, config)); err != nil { + return fmt.Errorf("Error reading Peering: %s", err) + } + + return nil +} + +func resourceActiveDirectoryPeeringUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Peering: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandActiveDirectoryPeeringLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + statusMessageProp, err := expandActiveDirectoryPeeringStatusMessage(d.Get("status_message"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("status_message"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, statusMessageProp)) { + obj["statusMessage"] = statusMessageProp + } + + url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Peering %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating Peering %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Peering %q: %#v", d.Id(), res) + } + + err = activeDirectoryOperationWaitTime( + config, res, project, "Updating Peering", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceActiveDirectoryPeeringRead(d, meta) +} + +func resourceActiveDirectoryPeeringDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Peering: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/peerings/{{peering_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Peering %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "Peering") + } + + err = activeDirectoryOperationWaitTime( + config, res, project, "Deleting Peering", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Peering %q: %#v", d.Id(), res) + return nil +} + +func flattenActiveDirectoryPeeringName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenActiveDirectoryPeeringLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenActiveDirectoryPeeringAuthorizedNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenActiveDirectoryPeeringDomainResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandActiveDirectoryPeeringLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandActiveDirectoryPeeringAuthorizedNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandActiveDirectoryPeeringDomainResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandActiveDirectoryPeeringStatusMessage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance.go index a395421e3b..c7966e0433 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apigee_instance.go @@ -210,7 +210,7 @@ func resourceApigeeInstanceCreate(d *schema.ResourceData, meta interface{}) erro billingProject = bp } - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isApigeeRetryableError) if err != nil { return fmt.Errorf("Error creating Instance: %s", err) } @@ -269,7 +269,7 @@ func resourceApigeeInstanceRead(d *schema.ResourceData, meta interface{}) error billingProject = bp } - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isApigeeRetryableError) if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("ApigeeInstance %q", d.Id())) } @@ -337,7 +337,7 @@ func resourceApigeeInstanceDelete(d *schema.ResourceData, meta interface{}) erro billingProject = bp } - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isApigeeRetryableError) if err != nil { return handleNotFoundError(err, d, "Instance") } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key.go index 6e756c074a..e847d3bbf0 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_apikeys_key.go @@ -49,7 +49,7 @@ func resourceApikeysKey() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The resource name of the key. The name must be unique within the project, must conform with RFC-1034, is restricted to lower-cased letters, and has a maximum length of 63 characters. In another word, the name must match the regular expression: [a-z]([a-z0-9-]{0,61}[a-z0-9])?.", + Description: "The resource name of the key. The name must be unique within the project, must conform with RFC-1034, is restricted to lower-cased letters, and has a maximum length of 63 characters. In another word, the name must match the regular expression: `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", }, "display_name": { @@ -81,6 +81,12 @@ func resourceApikeysKey() *schema.Resource { Sensitive: true, Description: "Output only. An encrypted and signed value held by this key. This field can be accessed only through the `GetKeyString` method.", }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Unique id in UUID4 format.", + }, }, } } @@ -233,12 +239,12 @@ func resourceApikeysKeyCreate(d *schema.ResourceData, meta interface{}) error { Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/global/keys/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -255,7 +261,7 @@ func resourceApikeysKeyCreate(d *schema.ResourceData, meta interface{}) error { } else { client.Config.BasePath = bp } - res, err := client.ApplyKey(context.Background(), obj, createDirective...) + res, err := client.ApplyKey(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -321,6 +327,9 @@ func resourceApikeysKeyRead(d *schema.ResourceData, meta interface{}) error { if err = d.Set("key_string", res.KeyString); err != nil { return fmt.Errorf("error setting key_string in state: %s", err) } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } return nil } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_assured_workloads_workload.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_assured_workloads_workload.go index 0e95583e91..e865bb1e9a 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_assured_workloads_workload.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_assured_workloads_workload.go @@ -207,12 +207,12 @@ func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interfa ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), } - id, err := replaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -229,7 +229,7 @@ func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interfa } else { client.Config.BasePath = bp } - res, err := client.ApplyWorkload(context.Background(), obj, createDirective...) + res, err := client.ApplyWorkload(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -242,10 +242,11 @@ func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interfa if err = d.Set("name", res.Name); err != nil { return fmt.Errorf("error setting name in state: %s", err) } - // Id has a server-generated value, set again after creation - id, err = replaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") + // ID has a server-generated value, set again after creation. + + id, err = res.ID() if err != nil { - return fmt.Errorf("Error constructing id: %s", err) + return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config.go index 64cc4dab57..9f51dec6b7 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_data_transfer_config.go @@ -67,7 +67,6 @@ func resourceBigqueryDataTransferConfig() *schema.Resource { "display_name": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: `The user specified display name for the transfer config.`, }, "params": { @@ -75,7 +74,9 @@ func resourceBigqueryDataTransferConfig() *schema.Resource { Required: true, Description: `Parameters specific to each data source. For more information see the bq tab in the 'Setting up a data transfer' section for each data source. For example the parameters for Cloud Storage transfers are listed here: -https://cloud.google.com/bigquery-transfer/docs/cloud-storage-transfer#bq`, +https://cloud.google.com/bigquery-transfer/docs/cloud-storage-transfer#bq + +**NOTE** : If you are attempting to update a parameter that cannot be updated (due to api limitations) [please force recreation of the resource](https://www.terraform.io/cli/state/taint#forcing-re-creation-of-resources).`, Elem: &schema.Schema{Type: schema.TypeString}, }, "data_refresh_window_days": { @@ -458,6 +459,12 @@ func resourceBigqueryDataTransferConfigUpdate(d *schema.ResourceData, meta inter billingProject = project obj := make(map[string]interface{}) + displayNameProp, err := expandBigqueryDataTransferConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } destinationDatasetIdProp, err := expandBigqueryDataTransferConfigDestinationDatasetId(d.Get("destination_dataset_id"), d, config) if err != nil { return err @@ -520,6 +527,10 @@ func resourceBigqueryDataTransferConfigUpdate(d *schema.ResourceData, meta inter log.Printf("[DEBUG] Updating Config %q: %#v", d.Id(), obj) updateMask := []string{} + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + if d.HasChange("destination_dataset_id") { updateMask = append(updateMask, "destinationDatasetId") } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_dataset_access.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_dataset_access.go index 3d9f0e0697..df9c610c6e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_dataset_access.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_dataset_access.go @@ -53,19 +53,19 @@ func resourceBigQueryDatasetAccessIamMemberDiffSuppress(k, old, new string, d *s } if memberInState := d.Get("user_by_email").(string); memberInState != "" { - return memberInState == strippedIamMember + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) } if memberInState := d.Get("group_by_email").(string); memberInState != "" { - return memberInState == strippedIamMember + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) } if memberInState := d.Get("domain").(string); memberInState != "" { - return memberInState == strippedIamMember + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) } if memberInState := d.Get("special_group").(string); memberInState != "" { - return memberInState == strippedIamMember + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) } } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_job.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_job.go index ecc2be77b1..2a58dbe7aa 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_job.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_job.go @@ -664,6 +664,7 @@ The BigQuery Service Account associated with your project requires access to thi }, "destination_table": { Type: schema.TypeList, + Computed: true, Optional: true, ForceNew: true, Description: `Describes the table where the query results should be stored. diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_reservation_assignment.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_reservation_assignment.go index 87828ad307..110d8758e2 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_reservation_assignment.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigquery_reservation_assignment.go @@ -118,7 +118,7 @@ func resourceBigqueryReservationAssignmentCreate(d *schema.ResourceData, meta in return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -135,7 +135,7 @@ func resourceBigqueryReservationAssignmentCreate(d *schema.ResourceData, meta in } else { client.Config.BasePath = bp } - res, err := client.ApplyAssignment(context.Background(), obj, createDirective...) + res, err := client.ApplyAssignment(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -145,7 +145,12 @@ func resourceBigqueryReservationAssignmentCreate(d *schema.ResourceData, meta in return fmt.Errorf("Error creating Assignment: %s", err) } - id, err = obj.ID() + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + // ID has a server-generated value, set again after creation. + + id, err = res.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance.go index f6440c7435..81fec4d44d 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_instance.go @@ -521,6 +521,7 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch for i, e := range orderedClusters { if e == nil { orderedClusters[i] = elem + break } } } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_table.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_table.go index 8b424d7c1f..d3acfbd5ec 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_table.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_bigtable_table.go @@ -276,7 +276,7 @@ func flattenColumnFamily(families []string) []map[string]interface{} { return result } -//TODO(rileykarson): Fix the stored import format after rebasing 3.0.0 +// TODO(rileykarson): Fix the stored import format after rebasing 3.0.0 func resourceBigtableTableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*Config) if err := parseImportId([]string{ diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget.go index f3bc607aac..612a4c4aad 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_billing_budget.go @@ -26,6 +26,17 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) +// Check to see if a specified value in the config exists and suppress diffs if so. Otherwise run emptyOrDefaultStringSuppress. + +func checkValAndDefaultStringSuppress(defaultVal string, checkVal string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + if _, ok := d.GetOkExists(checkVal); ok { + return false + } + return (old == "" && new == defaultVal) || (new == "" && old == defaultVal) + } +} + func resourceBillingBudget() *schema.Resource { return &schema.Resource{ Create: resourceBillingBudgetCreate, @@ -114,31 +125,6 @@ is "USD", then 1 unit is one US dollar.`, ForceNew: true, Description: `ID of the billing account to set a budget on.`, }, - "threshold_rules": { - Type: schema.TypeList, - Required: true, - Description: `Rules that trigger alerts (notifications of thresholds being -crossed) when spend exceeds the specified percentages of the -budget.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "threshold_percent": { - Type: schema.TypeFloat, - Required: true, - Description: `Send an alert when this threshold is exceeded. This is a -1.0-based percentage, so 0.5 = 50%. Must be >= 0.`, - }, - "spend_basis": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"CURRENT_SPEND", "FORECASTED_SPEND", ""}), - Description: `The type of basis used to determine if spend has passed -the threshold. Default value: "CURRENT_SPEND" Possible values: ["CURRENT_SPEND", "FORECASTED_SPEND"]`, - Default: "CURRENT_SPEND", - }, - }, - }, - }, "all_updates_rule": { Type: schema.TypeList, Optional: true, @@ -164,6 +150,7 @@ Account Users IAM roles for the target account.`, channel in the form projects/{project_id}/notificationChannels/{channel_id}. A maximum of 5 channels are allowed.`, + MaxItems: 5, Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -198,20 +185,29 @@ spend against the budget.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "calendar_period": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"MONTH", "QUARTER", "YEAR", "CALENDAR_PERIOD_UNSPECIFIED", ""}), + DiffSuppressFunc: checkValAndDefaultStringSuppress("MONTH", "budget_filter.0.custom_period.0.start_date"), + Description: `A CalendarPeriod represents the abstract concept of a recurring time period that has a +canonical start. Grammatically, "the start of the current CalendarPeriod". +All calendar times begin at 12 AM US and Canadian Pacific Time (UTC-8). + +Exactly one of 'calendar_period', 'custom_period' must be provided. Possible values: ["MONTH", "QUARTER", "YEAR", "CALENDAR_PERIOD_UNSPECIFIED"]`, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, + }, "credit_types": { Type: schema.TypeList, Computed: true, Optional: true, - Description: `A set of subaccounts of the form billingAccounts/{account_id}, -specifying that usage from only this set of subaccounts should -be included in the budget. If a subaccount is set to the name of -the parent account, usage from the parent account will be included. -If the field is omitted, the report will include usage from the parent -account and all subaccounts, if they exist.`, + Description: `Optional. If creditTypesTreatment is INCLUDE_SPECIFIED_CREDITS, +this is a list of credit types to be subtracted from gross cost to determine the spend for threshold calculations. See a list of acceptable credit type values. +If creditTypesTreatment is not INCLUDE_SPECIFIED_CREDITS, this field must be empty.`, Elem: &schema.Schema{ Type: schema.TypeString, }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, "credit_types_treatment": { Type: schema.TypeString, @@ -220,7 +216,78 @@ account and all subaccounts, if they exist.`, Description: `Specifies how credits should be treated when determining spend for threshold calculations. Default value: "INCLUDE_ALL_CREDITS" Possible values: ["INCLUDE_ALL_CREDITS", "EXCLUDE_ALL_CREDITS", "INCLUDE_SPECIFIED_CREDITS"]`, Default: "INCLUDE_ALL_CREDITS", - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, + }, + "custom_period": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies to track usage from any start date (required) to any end date (optional). +This time period is static, it does not recur. + +Exactly one of 'calendar_period', 'custom_period' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_date": { + Type: schema.TypeList, + Required: true, + Description: `A start date is required. The start date must be after January 1, 2017.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 31), + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month.`, + }, + "month": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 12), + Description: `Month of a year. Must be from 1 to 12.`, + }, + "year": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 9999), + Description: `Year of the date. Must be from 1 to 9999.`, + }, + }, + }, + }, + "end_date": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. The end date of the time period. Budgets with elapsed end date won't be processed. +If unset, specifies to track all usage incurred since the startDate.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 31), + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month.`, + }, + "month": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 12), + Description: `Month of a year. Must be from 1 to 12.`, + }, + "year": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 9999), + Description: `Year of the date. Must be from 1 to 9999.`, + }, + }, + }, + }, + }, + }, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, "labels": { Type: schema.TypeMap, @@ -229,7 +296,7 @@ for threshold calculations. Default value: "INCLUDE_ALL_CREDITS" Possible values Description: `A single label and value pair specifying that usage from only this set of labeled resources should be included in the budget.`, Elem: &schema.Schema{Type: schema.TypeString}, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, "projects": { Type: schema.TypeSet, @@ -243,7 +310,7 @@ the usage occurred on.`, Type: schema.TypeString, }, Set: schema.HashString, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, "services": { Type: schema.TypeList, @@ -258,7 +325,7 @@ https://cloud.google.com/billing/v1/how-tos/catalog-api.`, Elem: &schema.Schema{ Type: schema.TypeString, }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, "subaccounts": { Type: schema.TypeList, @@ -273,7 +340,7 @@ account and all subaccounts, if they exist.`, Elem: &schema.Schema{ Type: schema.TypeString, }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, + AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels", "budget_filter.0.calendar_period", "budget_filter.0.custom_period"}, }, }, }, @@ -283,6 +350,31 @@ account and all subaccounts, if they exist.`, Optional: true, Description: `User data for display name in UI. Must be <= 60 chars.`, }, + "threshold_rules": { + Type: schema.TypeList, + Optional: true, + Description: `Rules that trigger alerts (notifications of thresholds being +crossed) when spend exceeds the specified percentages of the +budget.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "threshold_percent": { + Type: schema.TypeFloat, + Required: true, + Description: `Send an alert when this threshold is exceeded. This is a +1.0-based percentage, so 0.5 = 50%. Must be >= 0.`, + }, + "spend_basis": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"CURRENT_SPEND", "FORECASTED_SPEND", ""}), + Description: `The type of basis used to determine if spend has passed +the threshold. Default value: "CURRENT_SPEND" Possible values: ["CURRENT_SPEND", "FORECASTED_SPEND"]`, + Default: "CURRENT_SPEND", + }, + }, + }, + }, "name": { Type: schema.TypeString, Computed: true, @@ -468,7 +560,10 @@ func resourceBillingBudgetUpdate(d *schema.ResourceData, meta interface{}) error if d.HasChange("budget_filter") { updateMask = append(updateMask, "budgetFilter.projects", - "budgetFilter.labels") + "budgetFilter.labels", + "budgetFilter.calendarPeriod", + "budgetFilter.customPeriod", + "budgetFilter.services") } if d.HasChange("amount") { @@ -593,6 +688,10 @@ func flattenBillingBudgetBudgetFilter(v interface{}, d *schema.ResourceData, con flattenBillingBudgetBudgetFilterSubaccounts(original["subaccounts"], d, config) transformed["labels"] = flattenBillingBudgetBudgetFilterLabels(original["labels"], d, config) + transformed["calendar_period"] = + flattenBillingBudgetBudgetFilterCalendarPeriod(original["calendarPeriod"], d, config) + transformed["custom_period"] = + flattenBillingBudgetBudgetFilterCustomPeriod(original["customPeriod"], d, config) return []interface{}{transformed} } func flattenBillingBudgetBudgetFilterProjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -643,6 +742,161 @@ func flattenBillingBudgetBudgetFilterLabels(v interface{}, d *schema.ResourceDat return transformed } +func flattenBillingBudgetBudgetFilterCalendarPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenBillingBudgetBudgetFilterCustomPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["start_date"] = + flattenBillingBudgetBudgetFilterCustomPeriodStartDate(original["startDate"], d, config) + transformed["end_date"] = + flattenBillingBudgetBudgetFilterCustomPeriodEndDate(original["endDate"], d, config) + return []interface{}{transformed} +} +func flattenBillingBudgetBudgetFilterCustomPeriodStartDate(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenBillingBudgetBudgetFilterCustomPeriodStartDateYear(original["year"], d, config) + transformed["month"] = + flattenBillingBudgetBudgetFilterCustomPeriodStartDateMonth(original["month"], d, config) + transformed["day"] = + flattenBillingBudgetBudgetFilterCustomPeriodStartDateDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenBillingBudgetBudgetFilterCustomPeriodStartDateYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBillingBudgetBudgetFilterCustomPeriodStartDateMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBillingBudgetBudgetFilterCustomPeriodStartDateDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBillingBudgetBudgetFilterCustomPeriodEndDate(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenBillingBudgetBudgetFilterCustomPeriodEndDateYear(original["year"], d, config) + transformed["month"] = + flattenBillingBudgetBudgetFilterCustomPeriodEndDateMonth(original["month"], d, config) + transformed["day"] = + flattenBillingBudgetBudgetFilterCustomPeriodEndDateDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenBillingBudgetBudgetFilterCustomPeriodEndDateYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBillingBudgetBudgetFilterCustomPeriodEndDateMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBillingBudgetBudgetFilterCustomPeriodEndDateDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + func flattenBillingBudgetAmount(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -825,6 +1079,20 @@ func expandBillingBudgetBudgetFilter(v interface{}, d TerraformResourceData, con transformed["labels"] = transformedLabels } + transformedCalendarPeriod, err := expandBillingBudgetBudgetFilterCalendarPeriod(original["calendar_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCalendarPeriod); val.IsValid() && !isEmptyValue(val) { + transformed["calendarPeriod"] = transformedCalendarPeriod + } + + transformedCustomPeriod, err := expandBillingBudgetBudgetFilterCustomPeriod(original["custom_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomPeriod); val.IsValid() && !isEmptyValue(val) { + transformed["customPeriod"] = transformedCustomPeriod + } + return transformed, nil } @@ -860,6 +1128,126 @@ func expandBillingBudgetBudgetFilterLabels(v interface{}, d TerraformResourceDat return m, nil } +func expandBillingBudgetBudgetFilterCalendarPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStartDate, err := expandBillingBudgetBudgetFilterCustomPeriodStartDate(original["start_date"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartDate); val.IsValid() && !isEmptyValue(val) { + transformed["startDate"] = transformedStartDate + } + + transformedEndDate, err := expandBillingBudgetBudgetFilterCustomPeriodEndDate(original["end_date"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndDate); val.IsValid() && !isEmptyValue(val) { + transformed["endDate"] = transformedEndDate + } + + return transformed, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodStartDate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandBillingBudgetBudgetFilterCustomPeriodStartDateYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandBillingBudgetBudgetFilterCustomPeriodStartDateMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandBillingBudgetBudgetFilterCustomPeriodStartDateDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodStartDateYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodStartDateMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodStartDateDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodEndDate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandBillingBudgetBudgetFilterCustomPeriodEndDateYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandBillingBudgetBudgetFilterCustomPeriodEndDateMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandBillingBudgetBudgetFilterCustomPeriodEndDateDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodEndDateYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodEndDateMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandBillingBudgetBudgetFilterCustomPeriodEndDateDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandBillingBudgetAmount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate.go index 515ac34931..d4d8ecfee9 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate.go @@ -65,7 +65,7 @@ and all following characters must be a dash, underscore, letter or digit.`, "labels": { Type: schema.TypeMap, Optional: true, - Description: `Set of label tags associated with the EdgeCache resource.`, + Description: `Set of label tags associated with the Certificate resource.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "managed": { @@ -97,10 +97,63 @@ Wildcard domains are only supported with DNS challenge resolution`, Type: schema.TypeString, }, }, + "authorization_attempt_info": { + Type: schema.TypeList, + Computed: true, + Description: `Detailed state of the latest authorization attempt for each domain +specified for this Managed Certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable explanation for reaching the state. Provided to help +address the configuration issues. +Not guaranteed to be stable. For programmatic access use 'failure_reason' field.`, + }, + "domain": { + Type: schema.TypeString, + Computed: true, + Description: `Domain name of the authorization attempt.`, + }, + "failure_reason": { + Type: schema.TypeString, + Computed: true, + Description: `Reason for failure of the authorization attempt for the domain.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the domain for managed certificate issuance.`, + }, + }, + }, + }, + "provisioning_issue": { + Type: schema.TypeList, + Computed: true, + Description: `Information about issues with provisioning this Managed Certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable explanation about the issue. Provided to help address +the configuration issues. +Not guaranteed to be stable. For programmatic access use 'reason' field.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `Reason for provisioning failures.`, + }, + }, + }, + }, "state": { Type: schema.TypeString, Computed: true, - Description: `State of the managed certificate resource.`, + Description: `A state of this Managed Certificate.`, }, }, }, @@ -110,16 +163,15 @@ Wildcard domains are only supported with DNS challenge resolution`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"DEFAULT", "EDGE_CACHE", ""}), DiffSuppressFunc: certManagerDefaultScopeDiffSuppress, Description: `The scope of the certificate. -Certificates with default scope are served from core Google data centers. +DEFAULT: Certificates with default scope are served from core Google data centers. If unsure, choose this option. -Certificates with scope EDGE_CACHE are special-purposed certificates, +EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from non-core Google data centers. -Currently allowed only for managed certificates. Default value: "DEFAULT" Possible values: ["DEFAULT", "EDGE_CACHE"]`, +Currently allowed only for managed certificates.`, Default: "DEFAULT", }, "self_managed": { @@ -483,24 +535,88 @@ func flattenCertificateManagerCertificateManaged(v interface{}, d *schema.Resour return nil } transformed := make(map[string]interface{}) - transformed["state"] = - flattenCertificateManagerCertificateManagedState(original["state"], d, config) transformed["domains"] = flattenCertificateManagerCertificateManagedDomains(original["domains"], d, config) transformed["dns_authorizations"] = flattenCertificateManagerCertificateManagedDnsAuthorizations(original["dnsAuthorizations"], d, config) + transformed["state"] = + flattenCertificateManagerCertificateManagedState(original["state"], d, config) + transformed["provisioning_issue"] = + flattenCertificateManagerCertificateManagedProvisioningIssue(original["provisioningIssue"], d, config) + transformed["authorization_attempt_info"] = + flattenCertificateManagerCertificateManagedAuthorizationAttemptInfo(original["authorizationAttemptInfo"], d, config) return []interface{}{transformed} } +func flattenCertificateManagerCertificateManagedDomains(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return d.Get("managed.0.dns_authorizations") +} + func flattenCertificateManagerCertificateManagedState(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } -func flattenCertificateManagerCertificateManagedDomains(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCertificateManagerCertificateManagedProvisioningIssue(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["reason"] = + flattenCertificateManagerCertificateManagedProvisioningIssueReason(original["reason"], d, config) + transformed["details"] = + flattenCertificateManagerCertificateManagedProvisioningIssueDetails(original["details"], d, config) + return []interface{}{transformed} +} +func flattenCertificateManagerCertificateManagedProvisioningIssueReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } -func flattenCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return d.Get("managed.0.dns_authorizations") +func flattenCertificateManagerCertificateManagedProvisioningIssueDetails(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfo(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "domain": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(original["domain"], d, config), + "state": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoState(original["state"], d, config), + "failure_reason": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(original["failureReason"], d, config), + "details": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(original["details"], d, config), + }) + } + return transformed +} +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v } func expandCertificateManagerCertificateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { @@ -565,6 +681,20 @@ func expandCertificateManagerCertificateManaged(v interface{}, d TerraformResour original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) + transformedDomains, err := expandCertificateManagerCertificateManagedDomains(original["domains"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDomains); val.IsValid() && !isEmptyValue(val) { + transformed["domains"] = transformedDomains + } + + transformedDnsAuthorizations, err := expandCertificateManagerCertificateManagedDnsAuthorizations(original["dns_authorizations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDnsAuthorizations); val.IsValid() && !isEmptyValue(val) { + transformed["dnsAuthorizations"] = transformedDnsAuthorizations + } + transformedState, err := expandCertificateManagerCertificateManagedState(original["state"], d, config) if err != nil { return nil, err @@ -572,31 +702,124 @@ func expandCertificateManagerCertificateManaged(v interface{}, d TerraformResour transformed["state"] = transformedState } - transformedDomains, err := expandCertificateManagerCertificateManagedDomains(original["domains"], d, config) + transformedProvisioningIssue, err := expandCertificateManagerCertificateManagedProvisioningIssue(original["provisioning_issue"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDomains); val.IsValid() && !isEmptyValue(val) { - transformed["domains"] = transformedDomains + } else if val := reflect.ValueOf(transformedProvisioningIssue); val.IsValid() && !isEmptyValue(val) { + transformed["provisioningIssue"] = transformedProvisioningIssue } - transformedDnsAuthorizations, err := expandCertificateManagerCertificateManagedDnsAuthorizations(original["dns_authorizations"], d, config) + transformedAuthorizationAttemptInfo, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfo(original["authorization_attempt_info"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDnsAuthorizations); val.IsValid() && !isEmptyValue(val) { - transformed["dnsAuthorizations"] = transformedDnsAuthorizations + } else if val := reflect.ValueOf(transformedAuthorizationAttemptInfo); val.IsValid() && !isEmptyValue(val) { + transformed["authorizationAttemptInfo"] = transformedAuthorizationAttemptInfo } return transformed, nil } +func expandCertificateManagerCertificateManagedDomains(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandCertificateManagerCertificateManagedState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandCertificateManagerCertificateManagedDomains(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCertificateManagerCertificateManagedProvisioningIssue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedReason, err := expandCertificateManagerCertificateManagedProvisioningIssueReason(original["reason"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReason); val.IsValid() && !isEmptyValue(val) { + transformed["reason"] = transformedReason + } + + transformedDetails, err := expandCertificateManagerCertificateManagedProvisioningIssueDetails(original["details"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDetails); val.IsValid() && !isEmptyValue(val) { + transformed["details"] = transformedDetails + } + + return transformed, nil +} + +func expandCertificateManagerCertificateManagedProvisioningIssueReason(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCertificateManagerCertificateManagedProvisioningIssueDetails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDomain, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(original["domain"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDomain); val.IsValid() && !isEmptyValue(val) { + transformed["domain"] = transformedDomain + } + + transformedState, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { + transformed["state"] = transformedState + } + + transformedFailureReason, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(original["failure_reason"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFailureReason); val.IsValid() && !isEmptyValue(val) { + transformed["failureReason"] = transformedFailureReason + } + + transformedDetails, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(original["details"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDetails); val.IsValid() && !isEmptyValue(val) { + transformed["details"] = transformedDetails + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map.go new file mode 100644 index 0000000000..cb8d7098d7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map.go @@ -0,0 +1,478 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceCertificateManagerCertificateMap() *schema.Resource { + return &schema.Resource{ + Create: resourceCertificateManagerCertificateMapCreate, + Read: resourceCertificateManagerCertificateMapRead, + Update: resourceCertificateManagerCertificateMapUpdate, + Delete: resourceCertificateManagerCertificateMapDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCertificateManagerCertificateMapImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the Certificate Map. Certificate Map names must be unique +globally and match the pattern 'projects/*/locations/*/certificateMaps/*'.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Set of labels associated with a Certificate Map resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp of a Certificate Map. Timestamp is in RFC3339 UTC "Zulu" format, +accurate to nanoseconds with up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "gclb_targets": { + Type: schema.TypeList, + Computed: true, + Description: `A list of target proxies that use this Certificate Map`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_configs": { + Type: schema.TypeList, + Optional: true, + Description: `An IP configuration where this Certificate Map is serving`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Optional: true, + Description: `An external IP address`, + }, + "ports": { + Type: schema.TypeList, + Optional: true, + Description: `A list of ports`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "target_https_proxy": { + Type: schema.TypeString, + Optional: true, + Description: `Proxy name must be in the format projects/*/locations/*/targetHttpsProxies/*. +This field is part of a union field 'target_proxy': Only one of 'targetHttpsProxy' or +'targetSslProxy' may be set.`, + }, + "target_ssl_proxy": { + Type: schema.TypeString, + Optional: true, + Description: `Proxy name must be in the format projects/*/locations/*/targetSslProxies/*. +This field is part of a union field 'target_proxy': Only one of 'targetHttpsProxy' or +'targetSslProxy' may be set.`, + }, + }, + }, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Update timestamp of a Certificate Map. Timestamp is in RFC3339 UTC "Zulu" format, +accurate to nanoseconds with up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCertificateManagerCertificateMapCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps?certificateMapId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new CertificateMap: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating CertificateMap: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = certificateManagerOperationWaitTime( + config, res, project, "Creating CertificateMap", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create CertificateMap: %s", err) + } + + log.Printf("[DEBUG] Finished creating CertificateMap %q: %#v", d.Id(), res) + + return resourceCertificateManagerCertificateMapRead(d, meta) +} + +func resourceCertificateManagerCertificateMapRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("CertificateManagerCertificateMap %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + + if err := d.Set("description", flattenCertificateManagerCertificateMapDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("create_time", flattenCertificateManagerCertificateMapCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("update_time", flattenCertificateManagerCertificateMapUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("labels", flattenCertificateManagerCertificateMapLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("gclb_targets", flattenCertificateManagerCertificateMapGclbTargets(res["gclbTargets"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + + return nil +} + +func resourceCertificateManagerCertificateMapUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating CertificateMap %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating CertificateMap %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating CertificateMap %q: %#v", d.Id(), res) + } + + err = certificateManagerOperationWaitTime( + config, res, project, "Updating CertificateMap", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceCertificateManagerCertificateMapRead(d, meta) +} + +func resourceCertificateManagerCertificateMapDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting CertificateMap %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "CertificateMap") + } + + err = certificateManagerOperationWaitTime( + config, res, project, "Deleting CertificateMap", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting CertificateMap %q: %#v", d.Id(), res) + return nil +} + +func resourceCertificateManagerCertificateMapImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/global/certificateMaps/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCertificateManagerCertificateMapDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargets(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "ip_configs": flattenCertificateManagerCertificateMapGclbTargetsIpConfigs(original["ipConfigs"], d, config), + "target_https_proxy": flattenCertificateManagerCertificateMapGclbTargetsTargetHttpsProxy(original["targetHttpsProxy"], d, config), + "target_ssl_proxy": flattenCertificateManagerCertificateMapGclbTargetsTargetSslProxy(original["targetSslProxy"], d, config), + }) + } + return transformed +} +func flattenCertificateManagerCertificateMapGclbTargetsIpConfigs(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "ip_address": flattenCertificateManagerCertificateMapGclbTargetsIpConfigsIpAddress(original["ipAddress"], d, config), + "ports": flattenCertificateManagerCertificateMapGclbTargetsIpConfigsPorts(original["ports"], d, config), + }) + } + return transformed +} +func flattenCertificateManagerCertificateMapGclbTargetsIpConfigsIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargetsIpConfigsPorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargetsTargetHttpsProxy(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargetsTargetSslProxy(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandCertificateManagerCertificateMapDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry.go new file mode 100644 index 0000000000..99f16ffcaf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_certificate_map_entry.go @@ -0,0 +1,523 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceCertificateManagerCertificateMapEntry() *schema.Resource { + return &schema.Resource{ + Create: resourceCertificateManagerCertificateMapEntryCreate, + Read: resourceCertificateManagerCertificateMapEntryRead, + Update: resourceCertificateManagerCertificateMapEntryUpdate, + Delete: resourceCertificateManagerCertificateMapEntryDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCertificateManagerCertificateMapEntryImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "certificates": { + Type: schema.TypeList, + Required: true, + DiffSuppressFunc: projectNumberDiffSuppress, + Description: `A set of Certificates defines for the given hostname. +There can be defined up to fifteen certificates in each Certificate Map Entry. +Each certificate must match pattern projects/*/locations/*/certificates/*.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "map": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: `A map entry that is inputted into the cetrificate map`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the Certificate Map Entry. Certificate Map Entry +names must be unique globally and match pattern +'projects/*/locations/*/certificateMaps/*/certificateMapEntries/*'`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "hostname": { + Type: schema.TypeString, + Optional: true, + Description: `A Hostname (FQDN, e.g. example.com) or a wildcard hostname expression (*.example.com) +for a set of hostnames with common suffix. Used as Server Name Indication (SNI) for +selecting a proper certificate.`, + ExactlyOneOf: []string{"hostname", "matcher"}, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Set of labels associated with a Certificate Map Entry. +An object containing a list of "key": value pairs. +Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "matcher": { + Type: schema.TypeString, + Optional: true, + Description: `A predefined matcher for particular cases, other than SNI selection`, + ExactlyOneOf: []string{"hostname", "matcher"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp of a Certificate Map Entry. Timestamp in RFC3339 UTC "Zulu" format, +with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `A serving state of this Certificate Map Entry.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Update timestamp of a Certificate Map Entry. Timestamp in RFC3339 UTC "Zulu" format, +with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCertificateManagerCertificateMapEntryCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapEntryDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapEntryLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + certificatesProp, err := expandCertificateManagerCertificateMapEntryCertificates(d.Get("certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificates"); !isEmptyValue(reflect.ValueOf(certificatesProp)) && (ok || !reflect.DeepEqual(v, certificatesProp)) { + obj["certificates"] = certificatesProp + } + hostnameProp, err := expandCertificateManagerCertificateMapEntryHostname(d.Get("hostname"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hostname"); !isEmptyValue(reflect.ValueOf(hostnameProp)) && (ok || !reflect.DeepEqual(v, hostnameProp)) { + obj["hostname"] = hostnameProp + } + matcherProp, err := expandCertificateManagerCertificateMapEntryMatcher(d.Get("matcher"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("matcher"); !isEmptyValue(reflect.ValueOf(matcherProp)) && (ok || !reflect.DeepEqual(v, matcherProp)) { + obj["matcher"] = matcherProp + } + nameProp, err := expandCertificateManagerCertificateMapEntryName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries?certificateMapEntryId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new CertificateMapEntry: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating CertificateMapEntry: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = certificateManagerOperationWaitTime( + config, res, project, "Creating CertificateMapEntry", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create CertificateMapEntry: %s", err) + } + + log.Printf("[DEBUG] Finished creating CertificateMapEntry %q: %#v", d.Id(), res) + + return resourceCertificateManagerCertificateMapEntryRead(d, meta) +} + +func resourceCertificateManagerCertificateMapEntryRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("CertificateManagerCertificateMapEntry %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + + if err := d.Set("description", flattenCertificateManagerCertificateMapEntryDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("create_time", flattenCertificateManagerCertificateMapEntryCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("update_time", flattenCertificateManagerCertificateMapEntryUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("labels", flattenCertificateManagerCertificateMapEntryLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("certificates", flattenCertificateManagerCertificateMapEntryCertificates(res["certificates"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("state", flattenCertificateManagerCertificateMapEntryState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("hostname", flattenCertificateManagerCertificateMapEntryHostname(res["hostname"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("matcher", flattenCertificateManagerCertificateMapEntryMatcher(res["matcher"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("name", flattenCertificateManagerCertificateMapEntryName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + + return nil +} + +func resourceCertificateManagerCertificateMapEntryUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapEntryDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapEntryLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + certificatesProp, err := expandCertificateManagerCertificateMapEntryCertificates(d.Get("certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificates"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificatesProp)) { + obj["certificates"] = certificatesProp + } + hostnameProp, err := expandCertificateManagerCertificateMapEntryHostname(d.Get("hostname"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hostname"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostnameProp)) { + obj["hostname"] = hostnameProp + } + matcherProp, err := expandCertificateManagerCertificateMapEntryMatcher(d.Get("matcher"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("matcher"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, matcherProp)) { + obj["matcher"] = matcherProp + } + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating CertificateMapEntry %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("certificates") { + updateMask = append(updateMask, "certificates") + } + + if d.HasChange("hostname") { + updateMask = append(updateMask, "hostname") + } + + if d.HasChange("matcher") { + updateMask = append(updateMask, "matcher") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating CertificateMapEntry %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating CertificateMapEntry %q: %#v", d.Id(), res) + } + + err = certificateManagerOperationWaitTime( + config, res, project, "Updating CertificateMapEntry", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceCertificateManagerCertificateMapEntryRead(d, meta) +} + +func resourceCertificateManagerCertificateMapEntryDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting CertificateMapEntry %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "CertificateMapEntry") + } + + err = certificateManagerOperationWaitTime( + config, res, project, "Deleting CertificateMapEntry", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting CertificateMapEntry %q: %#v", d.Id(), res) + return nil +} + +func resourceCertificateManagerCertificateMapEntryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/global/certificateMaps/(?P[^/]+)/certificateMapEntries/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCertificateManagerCertificateMapEntryDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryHostname(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + return NameFromSelfLinkStateFunc(v) +} + +func expandCertificateManagerCertificateMapEntryDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCertificateManagerCertificateMapEntryCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryHostname(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return GetResourceNameFromSelfLink(v.(string)), nil +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_dns_authorization.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_dns_authorization.go index dcfed30c3e..193d4a9767 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_dns_authorization.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_certificate_manager_dns_authorization.go @@ -66,7 +66,7 @@ and all following characters must be a dash, underscore, letter or digit.`, "labels": { Type: schema.TypeMap, Optional: true, - Description: `Set of label tags associated with the EdgeCache resource.`, + Description: `Set of label tags associated with the DNS Authorization resource.`, Elem: &schema.Schema{Type: schema.TypeString}, }, "dns_resource_record": { @@ -83,9 +83,10 @@ certificate.`, Description: `Data of the DNS Resource Record.`, }, "name": { - Type: schema.TypeString, - Computed: true, - Description: `Fully qualified name of the DNS Resource Record.`, + Type: schema.TypeString, + Computed: true, + Description: `Fully qualified name of the DNS Resource Record. +E.g. '_acme-challenge.example.com'.`, }, "type": { Type: schema.TypeString, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job.go index 98df42b392..caff55cfaf 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloud_scheduler_job.go @@ -227,9 +227,10 @@ send a request to the targeted url`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "uri": { - Type: schema.TypeString, - Required: true, - Description: `The full URI path that the request will be sent to.`, + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: lastSlashDiffSuppress, + Description: `The full URI path that the request will be sent to.`, }, "body": { Type: schema.TypeString, @@ -306,6 +307,12 @@ the URI specified in target will be used.`, }, ExactlyOneOf: []string{"pubsub_target", "http_target", "app_engine_http_target"}, }, + "paused": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `Sets the job to a paused state. Jobs default to being enabled when this property is not set.`, + }, "pubsub_target": { Type: schema.TypeList, Optional: true, @@ -418,6 +425,11 @@ Values greater than 5 and negative values are not allowed.`, The value of this field must be a time zone name from the tz database.`, Default: "Etc/UTC", }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the job.`, + }, "project": { Type: schema.TypeString, Optional: true, @@ -461,6 +473,12 @@ func resourceCloudSchedulerJobCreate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { obj["timeZone"] = timeZoneProp } + pausedProp, err := expandCloudSchedulerJobPaused(d.Get("paused"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("paused"); !isEmptyValue(reflect.ValueOf(pausedProp)) && (ok || !reflect.DeepEqual(v, pausedProp)) { + obj["paused"] = pausedProp + } attemptDeadlineProp, err := expandCloudSchedulerJobAttemptDeadline(d.Get("attempt_deadline"), d, config) if err != nil { return err @@ -492,6 +510,11 @@ func resourceCloudSchedulerJobCreate(d *schema.ResourceData, meta interface{}) e obj["httpTarget"] = httpTargetProp } + obj, err = resourceCloudSchedulerJobEncoder(d, meta, obj) + if err != nil { + return err + } + url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs") if err != nil { return err @@ -523,6 +546,28 @@ func resourceCloudSchedulerJobCreate(d *schema.ResourceData, meta interface{}) e } d.SetId(id) + endpoint := "resume" // Default to enabled + logSuccessMsg := "Job state has been set to ENABLED" + if paused, pausedOk := d.GetOk("paused"); pausedOk && paused.(bool) { + endpoint = "pause" + logSuccessMsg = "Job state has been set to PAUSED" + } + + linkTmpl := fmt.Sprintf("{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}:%s", endpoint) + url, err = replaceVars(d, config, linkTmpl) + if err != nil { + return err + } + + emptyReqBody := make(map[string]interface{}) + + _, err = sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, emptyReqBody, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error setting Cloud Scheduler Job status: %s", err) + } + + log.Printf("[DEBUG] Finished updating Job %q status: %s", d.Id(), logSuccessMsg) + log.Printf("[DEBUG] Finished creating Job %q: %#v", d.Id(), res) return resourceCloudSchedulerJobRead(d, meta) @@ -582,6 +627,12 @@ func resourceCloudSchedulerJobRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("time_zone", flattenCloudSchedulerJobTimeZone(res["timeZone"], d, config)); err != nil { return fmt.Errorf("Error reading Job: %s", err) } + if err := d.Set("state", flattenCloudSchedulerJobState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("paused", flattenCloudSchedulerJobPaused(res["paused"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } if err := d.Set("attempt_deadline", flattenCloudSchedulerJobAttemptDeadline(res["attemptDeadline"], d, config)); err != nil { return fmt.Errorf("Error reading Job: %s", err) } @@ -635,6 +686,12 @@ func resourceCloudSchedulerJobUpdate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { obj["timeZone"] = timeZoneProp } + pausedProp, err := expandCloudSchedulerJobPaused(d.Get("paused"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("paused"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pausedProp)) { + obj["paused"] = pausedProp + } attemptDeadlineProp, err := expandCloudSchedulerJobAttemptDeadline(d.Get("attempt_deadline"), d, config) if err != nil { return err @@ -666,6 +723,11 @@ func resourceCloudSchedulerJobUpdate(d *schema.ResourceData, meta interface{}) e obj["httpTarget"] = httpTargetProp } + obj, err = resourceCloudSchedulerJobUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}") if err != nil { return err @@ -686,6 +748,31 @@ func resourceCloudSchedulerJobUpdate(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Finished updating Job %q: %#v", d.Id(), res) } + if d.HasChange("paused") { + endpoint := "resume" // Default to enabled + logSuccessMsg := "Job state has been set to ENABLED" + if paused, pausedOk := d.GetOk("paused"); pausedOk { + if paused.(bool) { + endpoint = "pause" + logSuccessMsg = "Job state has been set to PAUSED" + } + } + + linkTmpl := fmt.Sprintf("{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}:%s", endpoint) + url, err = replaceVars(d, config, linkTmpl) + if err != nil { + return err + } + + emptyReqBody := make(map[string]interface{}) + + _, err = sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, emptyReqBody, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error setting Cloud Scheduler Job status: %s", err) + } + + log.Printf("[DEBUG] Finished updating Job %q status: %s", d.Id(), logSuccessMsg) + } return resourceCloudSchedulerJobRead(d, meta) } @@ -766,6 +853,21 @@ func flattenCloudSchedulerJobTimeZone(v interface{}, d *schema.ResourceData, con return v } +func flattenCloudSchedulerJobState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudSchedulerJobPaused(v interface{}, d *schema.ResourceData, config *Config) interface{} { + state := d.Get("state") + if state == "PAUSED" { + return true + } + if state == "ENABLED" { + return false + } + return false // Job has an error state that's not paused or enabled +} + func flattenCloudSchedulerJobAttemptDeadline(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1065,6 +1167,10 @@ func expandCloudSchedulerJobTimeZone(v interface{}, d TerraformResourceData, con return v, nil } +func expandCloudSchedulerJobPaused(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandCloudSchedulerJobAttemptDeadline(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -1447,3 +1553,13 @@ func expandCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(v interface{} func expandCloudSchedulerJobHttpTargetOidcTokenAudience(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } + +func resourceCloudSchedulerJobEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + delete(obj, "paused") // Field doesn't exist in API + return obj, nil +} + +func resourceCloudSchedulerJobUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + delete(obj, "paused") // Field doesn't exist in API + return obj, nil +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudbuild_worker_pool.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudbuild_worker_pool.go index a3bb21b66e..83a48f9cf7 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudbuild_worker_pool.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudbuild_worker_pool.go @@ -188,12 +188,12 @@ func resourceCloudbuildWorkerPoolCreate(d *schema.ResourceData, meta interface{} WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workerPools/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -210,7 +210,7 @@ func resourceCloudbuildWorkerPoolCreate(d *schema.ResourceData, meta interface{} } else { client.Config.BasePath = bp } - res, err := client.ApplyWorkerPool(context.Background(), obj, createDirective...) + res, err := client.ApplyWorkerPool(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_delivery_pipeline.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_delivery_pipeline.go index 5e0a1bead0..817e6cbf5a 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_delivery_pipeline.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_delivery_pipeline.go @@ -249,12 +249,12 @@ func resourceClouddeployDeliveryPipelineCreate(d *schema.ResourceData, meta inte Suspended: dcl.Bool(d.Get("suspended").(bool)), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -271,7 +271,7 @@ func resourceClouddeployDeliveryPipelineCreate(d *schema.ResourceData, meta inte } else { client.Config.BasePath = bp } - res, err := client.ApplyDeliveryPipeline(context.Background(), obj, createDirective...) + res, err := client.ApplyDeliveryPipeline(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_target.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_target.go index 73b1fa80b2..7128595f2f 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_target.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_clouddeploy_target.go @@ -83,6 +83,7 @@ func resourceClouddeployTarget() *schema.Resource { "execution_configs": { Type: schema.TypeList, + Computed: true, Optional: true, Description: "Configurations for all execution that relates to this `Target`. Each `ExecutionEnvironmentUsage` value may only be used in a single configuration; using the same value multiple times is an error. When one or more configurations are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage` values. When no configurations are specified, execution will use the default specified in `DefaultPool`.", Elem: ClouddeployTargetExecutionConfigsSchema(), @@ -177,12 +178,14 @@ func ClouddeployTargetExecutionConfigsSchema() *schema.Resource { "artifact_storage": { Type: schema.TypeString, + Computed: true, Optional: true, Description: "Optional. Cloud Storage location in which to store execution outputs. This can either be a bucket (\"gs://my-bucket\") or a path within a bucket (\"gs://my-bucket/my-dir\"). If unspecified, a default bucket located in the same region will be used.", }, "service_account": { Type: schema.TypeString, + Computed: true, Optional: true, Description: "Optional. Google service account to use for execution. If unspecified, the project execution service account (-compute@developer.gserviceaccount.com) is used.", }, @@ -236,12 +239,12 @@ func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) e RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/targets/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -258,7 +261,7 @@ func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) e } else { client.Config.BasePath = bp } - res, err := client.ApplyTarget(context.Background(), obj, createDirective...) + res, err := client.ApplyTarget(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -508,12 +511,12 @@ func flattenClouddeployTargetAnthosCluster(obj *clouddeploy.TargetAnthosCluster) } func expandClouddeployTargetExecutionConfigsArray(o interface{}) []clouddeploy.TargetExecutionConfigs { if o == nil { - return make([]clouddeploy.TargetExecutionConfigs, 0) + return nil } objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make([]clouddeploy.TargetExecutionConfigs, 0) + return nil } items := make([]clouddeploy.TargetExecutionConfigs, 0, len(objs)) @@ -527,14 +530,14 @@ func expandClouddeployTargetExecutionConfigsArray(o interface{}) []clouddeploy.T func expandClouddeployTargetExecutionConfigs(o interface{}) *clouddeploy.TargetExecutionConfigs { if o == nil { - return clouddeploy.EmptyTargetExecutionConfigs + return nil } obj := o.(map[string]interface{}) return &clouddeploy.TargetExecutionConfigs{ Usages: expandClouddeployTargetExecutionConfigsUsagesArray(obj["usages"]), - ArtifactStorage: dcl.String(obj["artifact_storage"].(string)), - ServiceAccount: dcl.String(obj["service_account"].(string)), + ArtifactStorage: dcl.StringOrNil(obj["artifact_storage"].(string)), + ServiceAccount: dcl.StringOrNil(obj["service_account"].(string)), WorkerPool: dcl.String(obj["worker_pool"].(string)), } } @@ -605,7 +608,6 @@ func flattenClouddeployTargetExecutionConfigsUsagesArray(obj []clouddeploy.Targe } return items } - func expandClouddeployTargetExecutionConfigsUsagesArray(o interface{}) []clouddeploy.TargetExecutionConfigsUsagesEnum { objs := o.([]interface{}) items := make([]clouddeploy.TargetExecutionConfigsUsagesEnum, 0, len(objs)) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function.go index 2aed46cd9c..e632e631b1 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions2_function.go @@ -200,6 +200,13 @@ response to a condition in another service.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "event_filters": { + Type: schema.TypeSet, + Optional: true, + Description: `Criteria used to filter events.`, + Elem: cloudfunctions2functionEventTriggerEventFiltersSchema(), + // Default schema.HashSchema is used. + }, "event_type": { Type: schema.TypeString, Optional: true, @@ -207,6 +214,7 @@ response to a condition in another service.`, }, "pubsub_topic": { Type: schema.TypeString, + Computed: true, Optional: true, Description: `The name of a Pub/Sub topic in the same project that will be used as the transport topic for the event delivery.`, @@ -366,6 +374,35 @@ timeout period. Defaults to 60 seconds.`, } } +func cloudfunctions2functionEventTriggerEventFiltersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attribute": { + Type: schema.TypeString, + Required: true, + Description: `'Required. The name of a CloudEvents attribute. +Currently, only a subset of attributes are supported for filtering. Use the 'gcloud eventarc providers describe' command to learn more about events and their attributes. +Do not filter for the 'type' attribute here, as this is already achieved by the resource's 'event_type' attribute.`, + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: `Required. The value for the attribute. +If the operator field is set as 'match-path-pattern', this value can be a path pattern instead of an exact value.`, + }, + "operator": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. The operator used for matching the events with the value of +the filter. If not specified, only events that have an exact key-value +pair specified in the filter are matched. +The only allowed value is 'match-path-pattern'. +[See documentation on path patterns here](https://cloud.google.com/eventarc/docs/path-patterns)'`, + }, + }, + } +} + func resourceCloudfunctions2functionCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) userAgent, err := generateUserAgentString(d, config.userAgent) @@ -1027,6 +1064,8 @@ func flattenCloudfunctions2functionEventTrigger(v interface{}, d *schema.Resourc flattenCloudfunctions2functionEventTriggerTriggerRegion(original["triggerRegion"], d, config) transformed["event_type"] = flattenCloudfunctions2functionEventTriggerEventType(original["eventType"], d, config) + transformed["event_filters"] = + flattenCloudfunctions2functionEventTriggerEventFilters(original["eventFilters"], d, config) transformed["pubsub_topic"] = flattenCloudfunctions2functionEventTriggerPubsubTopic(original["pubsubTopic"], d, config) transformed["service_account_email"] = @@ -1047,6 +1086,38 @@ func flattenCloudfunctions2functionEventTriggerEventType(v interface{}, d *schem return v } +func flattenCloudfunctions2functionEventTriggerEventFilters(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(cloudfunctions2functionEventTriggerEventFiltersSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "attribute": flattenCloudfunctions2functionEventTriggerEventFiltersAttribute(original["attribute"], d, config), + "value": flattenCloudfunctions2functionEventTriggerEventFiltersValue(original["value"], d, config), + "operator": flattenCloudfunctions2functionEventTriggerEventFiltersOperator(original["operator"], d, config), + }) + } + return transformed +} +func flattenCloudfunctions2functionEventTriggerEventFiltersAttribute(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudfunctions2functionEventTriggerEventFiltersValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudfunctions2functionEventTriggerEventFiltersOperator(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenCloudfunctions2functionEventTriggerPubsubTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1519,6 +1590,13 @@ func expandCloudfunctions2functionEventTrigger(v interface{}, d TerraformResourc transformed["eventType"] = transformedEventType } + transformedEventFilters, err := expandCloudfunctions2functionEventTriggerEventFilters(original["event_filters"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEventFilters); val.IsValid() && !isEmptyValue(val) { + transformed["eventFilters"] = transformedEventFilters + } + transformedPubsubTopic, err := expandCloudfunctions2functionEventTriggerPubsubTopic(original["pubsub_topic"], d, config) if err != nil { return nil, err @@ -1555,6 +1633,55 @@ func expandCloudfunctions2functionEventTriggerEventType(v interface{}, d Terrafo return v, nil } +func expandCloudfunctions2functionEventTriggerEventFilters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAttribute, err := expandCloudfunctions2functionEventTriggerEventFiltersAttribute(original["attribute"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAttribute); val.IsValid() && !isEmptyValue(val) { + transformed["attribute"] = transformedAttribute + } + + transformedValue, err := expandCloudfunctions2functionEventTriggerEventFiltersValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { + transformed["value"] = transformedValue + } + + transformedOperator, err := expandCloudfunctions2functionEventTriggerEventFiltersOperator(original["operator"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOperator); val.IsValid() && !isEmptyValue(val) { + transformed["operator"] = transformedOperator + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudfunctions2functionEventTriggerEventFiltersAttribute(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudfunctions2functionEventTriggerEventFiltersValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudfunctions2functionEventTriggerEventFiltersOperator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandCloudfunctions2functionEventTriggerPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions_function.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions_function.go index ea4e6520cb..53a7b7ac39 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions_function.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_cloudfunctions_function.go @@ -72,7 +72,7 @@ func parseCloudFunctionId(d *schema.ResourceData, config *Config) (*cloudFunctio }, nil } -// Differs from validateGcpName because Cloud Functions allow capital letters +// Differs from validateGCEName because Cloud Functions allow capital letters // at start/end func validateResourceCloudFunctionsFunctionName(v interface{}, k string) (ws []string, errors []error) { re := `^(?:[a-zA-Z](?:[-_a-zA-Z0-9]{0,61}[a-zA-Z0-9])?)$` diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment.go index 56e65d376c..1710fc751e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_composer_environment.go @@ -133,7 +133,7 @@ func resourceComposerEnvironment() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the environment.`, }, "region": { @@ -434,12 +434,13 @@ func resourceComposerEnvironment() *schema.Resource { Description: `When enabled, IPs from public (non-RFC1918) ranges can be used for ip_allocation_policy.cluster_ipv4_cidr_block and ip_allocation_policy.service_ipv4_cidr_block.`, }, "cloud_composer_connection_subnetwork": { - Type: schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: composerPrivateEnvironmentConfig, - ForceNew: true, - Description: `When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.`, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: composerPrivateEnvironmentConfig, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + Description: `When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.`, }, }, }, @@ -503,7 +504,7 @@ func resourceComposerEnvironment() *schema.Resource { Computed: true, AtLeastOneOf: composerConfigKeys, MaxItems: 1, - Description: `The encryption options for the Composer environment and its dependencies. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + Description: `The encryption options for the Composer environment and its dependencies.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "kms_key_name": { @@ -871,6 +872,21 @@ func resourceComposerEnvironmentUpdate(d *schema.ResourceData, meta interface{}) } } + if d.HasChange("config.0.software_config.0.scheduler_count") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{}, + }, + } + if config != nil && config.SoftwareConfig != nil { + patchObj.Config.SoftwareConfig.SchedulerCount = config.SoftwareConfig.SchedulerCount + } + err = resourceComposerEnvironmentPatchField("config.softwareConfig.schedulerCount", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + if d.HasChange("config.0.software_config.0.airflow_config_overrides") { patchObj := &composer.Environment{ Config: &composer.EnvironmentConfig{ diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler.go index 83b234b4f0..faae599ac5 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_autoscaler.go @@ -380,7 +380,7 @@ to include directives regarding slower scale down, as described above.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. The name must be 1-63 characters long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket.go index ccb5739785..01e23712d1 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_bucket.go @@ -68,6 +68,21 @@ last character, which cannot be a dash.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "bypass_cache_on_request_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.`, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_name": { + Type: schema.TypeString, + Optional: true, + Description: `The header field name to match on when bypassing cache. Values are case-insensitive.`, + }, + }, + }, + }, "cache_key_policy": { Type: schema.TypeList, Optional: true, @@ -154,6 +169,11 @@ can be specified as values, and you cannot specify a status code more than once. }, }, }, + "request_coalescing": { + Type: schema.TypeBool, + Optional: true, + Description: `If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.`, + }, "serve_while_stale": { Type: schema.TypeInt, Computed: true, @@ -607,6 +627,10 @@ func flattenComputeBackendBucketCdnPolicy(v interface{}, d *schema.ResourceData, flattenComputeBackendBucketCdnPolicyCacheMode(original["cacheMode"], d, config) transformed["serve_while_stale"] = flattenComputeBackendBucketCdnPolicyServeWhileStale(original["serveWhileStale"], d, config) + transformed["request_coalescing"] = + flattenComputeBackendBucketCdnPolicyRequestCoalescing(original["requestCoalescing"], d, config) + transformed["bypass_cache_on_request_headers"] = + flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(original["bypassCacheOnRequestHeaders"], d, config) return []interface{}{transformed} } func flattenComputeBackendBucketCdnPolicyCacheKeyPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -778,6 +802,32 @@ func flattenComputeBackendBucketCdnPolicyServeWhileStale(v interface{}, d *schem return v // let terraform core handle it otherwise } +func flattenComputeBackendBucketCdnPolicyRequestCoalescing(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "header_name": flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(original["headerName"], d, config), + }) + } + return transformed +} +func flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenComputeBackendBucketEdgeSecurityPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -878,6 +928,20 @@ func expandComputeBackendBucketCdnPolicy(v interface{}, d TerraformResourceData, transformed["serveWhileStale"] = transformedServeWhileStale } + transformedRequestCoalescing, err := expandComputeBackendBucketCdnPolicyRequestCoalescing(original["request_coalescing"], d, config) + if err != nil { + return nil, err + } else { + transformed["requestCoalescing"] = transformedRequestCoalescing + } + + transformedBypassCacheOnRequestHeaders, err := expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(original["bypass_cache_on_request_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBypassCacheOnRequestHeaders); val.IsValid() && !isEmptyValue(val) { + transformed["bypassCacheOnRequestHeaders"] = transformedBypassCacheOnRequestHeaders + } + return transformed, nil } @@ -980,6 +1044,36 @@ func expandComputeBackendBucketCdnPolicyServeWhileStale(v interface{}, d Terrafo return v, nil } +func expandComputeBackendBucketCdnPolicyRequestCoalescing(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHeaderName, err := expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(original["header_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + transformed["headerName"] = transformedHeaderName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandComputeBackendBucketEdgeSecurityPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service.go index 7148838b7a..28dbfd538a 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_backend_service.go @@ -995,7 +995,10 @@ partial URL.`, For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) -and CONNECTION (for TCP/SSL). Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, +and CONNECTION (for TCP/SSL). + +See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) +for an explanation of load balancing modes. Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, Default: "UTILIZATION", }, "capacity_scaler": { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk.go index ea73b72706..a93db04637 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_disk.go @@ -140,9 +140,15 @@ func diskImageEquals(oldImageName, newImageName string) bool { func diskImageFamilyEquals(imageName, familyName string) bool { // Handles the case when the image name includes the family name - // e.g. image name: debian-9-drawfork-v20180109, family name: debian-9 - if strings.Contains(imageName, familyName) { - return true + // e.g. image name: debian-11-bullseye-v20220719, family name: debian-11 + // We have to check for arm64 because of cases like: + // image name: opensuse-leap-15-4-v20220713-arm64, family name: opensuse-leap (should not suppress) + if strings.Contains(imageName, strings.TrimSuffix(familyName, "-arm64")) { + if strings.Contains(imageName, "-arm64") { + return strings.HasSuffix(familyName, "-arm64") + } else { + return !strings.HasSuffix(familyName, "-arm64") + } } if suppressCanonicalFamilyDiff(imageName, familyName) { @@ -167,8 +173,13 @@ func diskImageFamilyEquals(imageName, familyName string) bool { // e.g. image: ubuntu-1404-trusty-v20180122, family: ubuntu-1404-lts func suppressCanonicalFamilyDiff(imageName, familyName string) bool { parts := canonicalUbuntuLtsImage.FindStringSubmatch(imageName) - if len(parts) == 3 { - f := fmt.Sprintf("ubuntu-%s%s-lts", parts[1], parts[2]) + if len(parts) == 4 { + var f string + if parts[3] == "" { + f = fmt.Sprintf("ubuntu-%s%s-lts", parts[1], parts[2]) + } else { + f = fmt.Sprintf("ubuntu-%s%s-lts-%s", parts[1], parts[2], parts[3]) + } if f == familyName { return true } @@ -378,6 +389,7 @@ the supported values for the caller's project.`, }, "provisioned_iops": { Type: schema.TypeInt, + Computed: true, Optional: true, ForceNew: true, Description: `Indicates how many IOPS must be provisioned for the disk.`, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall.go index f382914f66..2eca41d907 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall.go @@ -150,7 +150,7 @@ func resourceComputeFirewall() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match @@ -311,7 +311,7 @@ one of 'source_ranges', 'source_tags' or 'source_service_accounts' is required.` Type: schema.TypeString, }, Set: schema.HashString, - ConflictsWith: []string{"destination_ranges", "source_service_accounts", "target_service_accounts"}, + ConflictsWith: []string{"source_service_accounts", "destination_ranges", "target_service_accounts"}, }, "target_service_accounts": { Type: schema.TypeSet, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy.go index f38b40740a..9101c16817 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy.go @@ -120,12 +120,12 @@ func resourceComputeFirewallPolicyCreate(d *schema.ResourceData, meta interface{ Description: dcl.String(d.Get("description").(string)), } - id, err := replaceVars(d, config, "locations/global/firewallPolicies/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -142,7 +142,7 @@ func resourceComputeFirewallPolicyCreate(d *schema.ResourceData, meta interface{ } else { client.Config.BasePath = bp } - res, err := client.ApplyFirewallPolicy(context.Background(), obj, createDirective...) + res, err := client.ApplyFirewallPolicy(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -155,10 +155,11 @@ func resourceComputeFirewallPolicyCreate(d *schema.ResourceData, meta interface{ if err = d.Set("name", res.Name); err != nil { return fmt.Errorf("error setting name in state: %s", err) } - // Id has a server-generated value, set again after creation - id, err = replaceVars(d, config, "locations/global/firewallPolicies/{{name}}") + // ID has a server-generated value, set again after creation. + + id, err = res.ID() if err != nil { - return fmt.Errorf("Error constructing id: %s", err) + return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_association.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_association.go index 8779c92a88..5a844026c5 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_association.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_association.go @@ -84,12 +84,12 @@ func resourceComputeFirewallPolicyAssociationCreate(d *schema.ResourceData, meta Name: dcl.String(d.Get("name").(string)), } - id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -106,7 +106,7 @@ func resourceComputeFirewallPolicyAssociationCreate(d *schema.ResourceData, meta } else { client.Config.BasePath = bp } - res, err := client.ApplyFirewallPolicyAssociation(context.Background(), obj, createDirective...) + res, err := client.ApplyFirewallPolicyAssociation(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_rule.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_rule.go index 034ead910a..ba4261510e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_rule.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_firewall_policy_rule.go @@ -190,12 +190,12 @@ func resourceComputeFirewallPolicyRuleCreate(d *schema.ResourceData, meta interf TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), } - id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -212,7 +212,7 @@ func resourceComputeFirewallPolicyRuleCreate(d *schema.ResourceData, meta interf } else { client.Config.BasePath = bp } - res, err := client.ApplyFirewallPolicyRule(context.Background(), obj, createDirective...) + res, err := client.ApplyFirewallPolicyRule(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule.go index 13e31e051a..730580af76 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_forwarding_rule.go @@ -187,7 +187,7 @@ func resourceComputeForwardingRule() *schema.Resource { Optional: true, ForceNew: true, Description: "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, }, "subnetwork": { @@ -305,7 +305,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -322,7 +322,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ } else { client.Config.BasePath = bp } - res, err := client.ApplyForwardingRule(context.Background(), obj, createDirective...) + res, err := client.ApplyForwardingRule(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule.go index ad98284c62..33a65cbf6d 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_forwarding_rule.go @@ -232,12 +232,12 @@ func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta inte Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -254,7 +254,7 @@ func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta inte } else { client.Config.BasePath = bp } - res, err := client.ApplyForwardingRule(context.Background(), obj, createDirective...) + res, err := client.ApplyForwardingRule(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_network_endpoint_group.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_network_endpoint_group.go index 65ba650432..847400db51 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_network_endpoint_group.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_global_network_endpoint_group.go @@ -43,7 +43,7 @@ func resourceComputeGlobalNetworkEndpointGroup() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ha_vpn_gateway.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ha_vpn_gateway.go index 2b38e76a4a..bd24bc33fc 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ha_vpn_gateway.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ha_vpn_gateway.go @@ -43,7 +43,7 @@ func resourceComputeHaVpnGateway() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance.go index c02fefa540..a571191114 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance.go @@ -46,6 +46,7 @@ var ( "scheduling.0.node_affinities", "scheduling.0.min_node_cpus", "scheduling.0.provisioning_model", + "scheduling.0.instance_termination_action", } shieldedInstanceConfigKeys = []string{ @@ -613,11 +614,13 @@ func resourceComputeInstance() *schema.Resource { DiffSuppressFunc: emptyOrDefaultStringSuppress(""), Description: `Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems.`, }, + "min_node_cpus": { Type: schema.TypeInt, Optional: true, AtLeastOneOf: schedulingKeys, }, + "provisioning_model": { Type: schema.TypeString, Optional: true, @@ -626,6 +629,13 @@ func resourceComputeInstance() *schema.Resource { AtLeastOneOf: schedulingKeys, Description: `Whether the instance is spot. If this is set as SPOT.`, }, + + "instance_termination_action": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: schedulingKeys, + Description: `Specifies the action GCE should take when SPOT VM is preempted.`, + }, }, }, }, @@ -2208,13 +2218,15 @@ func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([ // issues when a count of `0` guest accelerators is desired. This may occur when // guest_accelerator support is controlled via a module variable. E.g.: // -// guest_accelerators { -// count = "${var.enable_gpu ? var.gpu_count : 0}" -// ... -// } +// guest_accelerators { +// count = "${var.enable_gpu ? var.gpu_count : 0}" +// ... +// } + // After reconciling the desired and actual state, we would otherwise see a -// perpetual resembling: -// [] != [{"count":0, "type": "nvidia-tesla-k80"}] +// perpetual diff resembling: +// +// [] != [{"count":0, "type": "nvidia-tesla-k80"}] func suppressEmptyGuestAcceleratorDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { oldi, newi := d.GetChange("guest_accelerator") diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager.go index 5ee889c995..5f86a17e60 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_group_manager.go @@ -27,7 +27,6 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Update: schema.DefaultTimeout(15 * time.Minute), Delete: schema.DefaultTimeout(15 * time.Minute), }, - Schema: map[string]*schema.Schema{ "base_instance_name": { Type: schema.TypeString, @@ -267,7 +266,30 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, }, }, - + "all_instances_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies configuration that overrides the instance template configuration for the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata,`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The label key-value pairs that you want to patch onto the instance,`, + }, + }, + }, + }, "wait_for_instances": { Type: schema.TypeBool, Optional: true, @@ -279,7 +301,8 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Optional: true, Default: "STABLE", ValidateFunc: validation.StringInSlice([]string{"STABLE", "UPDATED"}, false), - Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective as well as all instances to be stable before returning.`, + + Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective and all instances configs to be effective as well as all instances to be stable before returning.`, }, "stateful_disk": { Type: schema.TypeSet, @@ -333,6 +356,20 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, }, }, + "all_instances_config": { + Type: schema.TypeList, + Computed: true, + Description: `Status of all-instances configuration on the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effective": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether this configuration has been applied to all managed instances in the group.`, + }, + }, + }, + }, "stateful": { Type: schema.TypeList, Computed: true, @@ -423,6 +460,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), Versions: expandVersions(d.Get("version").([]interface{})), UpdatePolicy: expandUpdatePolicy(d.Get("update_policy").([]interface{})), + AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})), StatefulPolicy: expandStatefulPolicy(d.Get("stateful_disk").(*schema.Set).List()), // Force send TargetSize to allow a value of 0. ForceSendFields: []string{"TargetSize"}, @@ -630,6 +668,11 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf if err = d.Set("update_policy", flattenUpdatePolicy(manager.UpdatePolicy)); err != nil { return fmt.Errorf("Error setting update_policy in state: %s", err.Error()) } + if manager.AllInstancesConfig != nil { + if err = d.Set("all_instances_config", flattenAllInstancesConfig(manager.AllInstancesConfig)); err != nil { + return fmt.Errorf("Error setting all_instances_config in state: %s", err.Error()) + } + } if err = d.Set("status", flattenStatus(manager.Status)); err != nil { return fmt.Errorf("Error setting status in state: %s", err.Error()) } @@ -695,6 +738,16 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte change = true } + if d.HasChange("all_instances_config") { + oldAic, newAic := d.GetChange("all_instances_config") + if newAic == nil || len(newAic.([]interface{})) == 0 { + updatedManager.NullFields = append(updatedManager.NullFields, "AllInstancesConfig") + } else { + updatedManager.AllInstancesConfig = expandAllInstancesConfig(oldAic.([]interface{}), newAic.([]interface{})) + } + change = true + } + if d.HasChange("stateful_disk") { updatedManager.StatefulPolicy = expandStatefulPolicy(d.Get("stateful_disk").(*schema.Set).List()) change = true @@ -838,7 +891,7 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte func computeIGMWaitForInstanceStatus(d *schema.ResourceData, meta interface{}) error { waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" conf := resource.StateChangeConf{ - Pending: []string{"creating", "error", "updating per instance configs", "reaching version target"}, + Pending: []string{"creating", "error", "updating per instance configs", "reaching version target", "updating all instances config"}, Target: []string{"created"}, Refresh: waitForInstancesRefreshFunc(getManager, waitForUpdates, d, meta), Timeout: d.Timeout(schema.TimeoutCreate), @@ -1020,6 +1073,58 @@ func flattenUpdatePolicy(updatePolicy *compute.InstanceGroupManagerUpdatePolicy) return results } +func expandAllInstancesConfig(old []interface{}, new []interface{}) *compute.InstanceGroupManagerAllInstancesConfig { + var properties *compute.InstancePropertiesPatch + for _, raw := range new { + properties = &compute.InstancePropertiesPatch{} + data := raw.(map[string]interface{}) + properties.Metadata = convertStringMap(data["metadata"].(map[string]interface{})) + if len(properties.Metadata) == 0 { + properties.NullFields = append(properties.NullFields, "Metadata") + } + properties.Labels = convertStringMap(data["labels"].(map[string]interface{})) + if len(properties.Labels) == 0 { + properties.NullFields = append(properties.NullFields, "Labels") + } + } + + if properties != nil { + for _, raw := range old { + data := raw.(map[string]interface{}) + for k := range data["metadata"].(map[string]interface{}) { + if _, exist := properties.Metadata[k]; !exist { + properties.NullFields = append(properties.NullFields, fmt.Sprintf("Metadata.%s", k)) + } + } + for k := range data["labels"].(map[string]interface{}) { + if _, exist := properties.Labels[k]; !exist { + properties.NullFields = append(properties.NullFields, fmt.Sprintf("Labels.%s", k)) + } + } + } + } + if properties != nil { + allInstancesConfig := &compute.InstanceGroupManagerAllInstancesConfig{} + allInstancesConfig.Properties = properties + return allInstancesConfig + } else { + return nil + } +} + +func flattenAllInstancesConfig(allInstancesConfig *compute.InstanceGroupManagerAllInstancesConfig) []map[string]interface{} { + results := []map[string]interface{}{} + props := map[string]interface{}{} + if len(allInstancesConfig.Properties.Metadata) > 0 { + props["metadata"] = allInstancesConfig.Properties.Metadata + } + if len(allInstancesConfig.Properties.Labels) > 0 { + props["labels"] = allInstancesConfig.Properties.Labels + } + results = append(results, props) + return results +} + func flattenStatus(status *compute.InstanceGroupManagerStatus) []map[string]interface{} { results := []map[string]interface{}{} data := map[string]interface{}{ @@ -1027,6 +1132,9 @@ func flattenStatus(status *compute.InstanceGroupManagerStatus) []map[string]inte "stateful": flattenStatusStateful(status.Stateful), "version_target": flattenStatusVersionTarget(status.VersionTarget), } + if status.AllInstancesConfig != nil { + data["all_instances_config"] = flattenStatusAllInstancesConfig(status.AllInstancesConfig) + } results = append(results, data) return results } @@ -1059,6 +1167,15 @@ func flattenStatusVersionTarget(versionTarget *compute.InstanceGroupManagerStatu return results } +func flattenStatusAllInstancesConfig(allInstancesConfig *compute.InstanceGroupManagerStatusAllInstancesConfig) []map[string]interface{} { + results := []map[string]interface{}{} + data := map[string]interface{}{ + "effective": allInstancesConfig.Effective, + } + results = append(results, data) + return results +} + func resourceInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { if err := d.Set("wait_for_instances", false); err != nil { return nil, fmt.Errorf("Error setting wait_for_instances: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template.go index 62552b3fbc..0d15ea0f00 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_instance_template.go @@ -24,6 +24,7 @@ var ( "scheduling.0.node_affinities", "scheduling.0.min_node_cpus", "scheduling.0.provisioning_model", + "scheduling.0.instance_termination_action", } shieldedInstanceTemplateConfigKeys = []string{ @@ -66,7 +67,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `The name of the instance template. If you leave this blank, Terraform will auto-generate a unique name.`, }, @@ -535,6 +536,13 @@ func resourceComputeInstanceTemplate() *schema.Resource { AtLeastOneOf: schedulingInstTemplateKeys, Description: `Whether the instance is spot. If this is set as SPOT.`, }, + "instance_termination_action": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Description: `Specifies the action GCE should take when SPOT VM is preempted.`, + }, }, }, }, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network.go index 4085a8b76f..b1fb18035d 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network.go @@ -43,9 +43,10 @@ func resourceComputeNetwork() *schema.Resource { Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateGCEName, Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint.go index e8ebd5bfd7..da00fa666e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint.go @@ -54,12 +54,6 @@ range).`, DiffSuppressFunc: compareResourceNames, Description: `The network endpoint group this endpoint is part of.`, }, - "port": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: `Port number of network endpoint.`, - }, "instance": { Type: schema.TypeString, Optional: true, @@ -69,6 +63,12 @@ range).`, This is required for network endpoints of type GCE_VM_IP_PORT. The instance must be in the same zone of network endpoint group.`, }, + "port": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Port number of network endpoint.`, + }, "zone": { Type: schema.TypeString, Computed: true, @@ -285,7 +285,9 @@ func resourceComputeNetworkEndpointDelete(d *schema.ResourceData, meta interface if err != nil { return err } - toDelete["port"] = portProp + if portProp != 0 { + toDelete["port"] = portProp + } ipAddressProp, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, config) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group.go index 12a8531ec6..f18d54c037 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_endpoint_group.go @@ -43,7 +43,7 @@ func resourceComputeNetworkEndpointGroup() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match @@ -78,14 +78,16 @@ you create the resource.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT", ""}), + ValidateFunc: validateEnum([]string{"GCE_VM_IP", "GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT", ""}), Description: `Type of network endpoints in this network endpoint group. NON_GCP_PRIVATE_IP_PORT is used for hybrid connectivity network endpoint groups (see https://cloud.google.com/load-balancing/docs/hybrid). Note that NON_GCP_PRIVATE_IP_PORT can only be used with Backend Services that 1) have the following load balancing schemes: EXTERNAL, EXTERNAL_MANAGED, INTERNAL_MANAGED, and INTERNAL_SELF_MANAGED and 2) support the RATE or -CONNECTION balancing modes. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"]`, +CONNECTION balancing modes. + +Possible values include: GCE_VM_IP, GCE_VM_IP_PORT, and NON_GCP_PRIVATE_IP_PORT. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP", "GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"]`, Default: "GCE_VM_IP_PORT", }, "subnetwork": { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_peering.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_peering.go index 1efade4321..a5707c11cc 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_peering.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_network_peering.go @@ -36,7 +36,7 @@ func resourceComputeNetworkPeering() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the peering.`, }, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring.go index 57d8832cfb..d9954e162d 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_packet_mirroring.go @@ -114,7 +114,7 @@ set to true.`, "name": { Type: schema.TypeString, Required: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `The name of the packet mirroring rule`, }, "network": { @@ -169,10 +169,9 @@ destination (egress) IP in the IP header. Only IPv4 is supported.`, "ip_protocols": { Type: schema.TypeList, Optional: true, - Description: `Protocols that apply as a filter on mirrored traffic. Possible values: ["tcp", "udp", "icmp"]`, + Description: `Possible IP protocols including tcp, udp, icmp and esp`, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"tcp", "udp", "icmp"}), + Type: schema.TypeString, }, }, }, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler.go index f495759851..2d25c68982 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_autoscaler.go @@ -379,7 +379,7 @@ to include directives regarding slower scale down, as described above.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. The name must be 1-63 characters long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service.go index 72cc546d45..9c821575be 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_backend_service.go @@ -41,7 +41,7 @@ var backendServiceOnlyManagedFieldNames = []string{ // validateManagedBackendServiceBackends ensures capacity_scaler is set for each backend in a managed // backend service. To prevent a permadiff, we decided to override the API behavior and require the -//// capacity_scaler value in this case. +// capacity_scaler value in this case. // // The API: // - requires the sum of the backends' capacity_scalers be > 0 @@ -1017,8 +1017,11 @@ partial URL.`, Type: schema.TypeString, Optional: true, ValidateFunc: validateEnum([]string{"UTILIZATION", "RATE", "CONNECTION", ""}), - Description: `Specifies the balancing mode for this backend. Default value: "CONNECTION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, - Default: "CONNECTION", + Description: `Specifies the balancing mode for this backend. + +See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) +for an explanation of load balancing modes. Default value: "CONNECTION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, + Default: "CONNECTION", }, "capacity_scaler": { Type: schema.TypeFloat, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager.go index c61ab6229e..365eec2797 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_instance_group_manager.go @@ -180,7 +180,8 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { Optional: true, Default: "STABLE", ValidateFunc: validation.StringInSlice([]string{"STABLE", "UPDATED"}, false), - Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective as well as all instances to be stable before returning.`, + + Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective and all instances configs to be effective as well as all instances to be stable before returning.`, }, "auto_healing_policies": { @@ -312,7 +313,30 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, }, - + "all_instances_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies configuration that overrides the instance template configuration for the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata,`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The label key-value pairs that you want to patch onto the instance,`, + }, + }, + }, + }, "stateful_disk": { Type: schema.TypeSet, Optional: true, @@ -361,6 +385,20 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, }, + "all_instances_config": { + Type: schema.TypeList, + Computed: true, + Description: `Status of all-instances configuration on the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effective": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether this configuration has been applied to all managed instances in the group.`, + }, + }, + }, + }, "stateful": { Type: schema.TypeList, Computed: true, @@ -424,6 +462,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), Versions: expandVersions(d.Get("version").([]interface{})), UpdatePolicy: expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})), + AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})), DistributionPolicy: expandDistributionPolicy(d), StatefulPolicy: expandStatefulPolicy(d.Get("stateful_disk").(*schema.Set).List()), // Force send TargetSize to allow size of 0. @@ -461,7 +500,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met func computeRIGMWaitForInstanceStatus(d *schema.ResourceData, meta interface{}) error { waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" conf := resource.StateChangeConf{ - Pending: []string{"creating", "error", "updating per instance configs", "reaching version target"}, + Pending: []string{"creating", "error", "updating per instance configs", "reaching version target", "updating all instances config"}, Target: []string{"created"}, Refresh: waitForInstancesRefreshFunc(getRegionalManager, waitForUpdates, d, meta), Timeout: d.Timeout(schema.TimeoutCreate), @@ -520,6 +559,12 @@ func waitForInstancesRefreshFunc(f getInstanceManagerFunc, waitForUpdates bool, if !m.Status.VersionTarget.IsReached { return false, "reaching version target", nil } + if !m.Status.VersionTarget.IsReached { + return false, "reaching version target", nil + } + if !m.Status.AllInstancesConfig.Effective { + return false, "updating all instances config", nil + } } return true, "created", nil } else { @@ -595,6 +640,11 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta if err := d.Set("update_policy", flattenRegionUpdatePolicy(manager.UpdatePolicy)); err != nil { return fmt.Errorf("Error setting update_policy in state: %s", err.Error()) } + if manager.AllInstancesConfig != nil { + if err = d.Set("all_instances_config", flattenAllInstancesConfig(manager.AllInstancesConfig)); err != nil { + return fmt.Errorf("Error setting all_instances_config in state: %s", err.Error()) + } + } if err = d.Set("stateful_disk", flattenStatefulPolicy(manager.StatefulPolicy)); err != nil { return fmt.Errorf("Error setting stateful_disk in state: %s", err.Error()) } @@ -657,6 +707,16 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met change = true } + if d.HasChange("all_instances_config") { + oldAic, newAic := d.GetChange("all_instances_config") + if newAic == nil || len(newAic.([]interface{})) == 0 { + updatedManager.NullFields = append(updatedManager.NullFields, "AllInstancesConfig") + } else { + updatedManager.AllInstancesConfig = expandAllInstancesConfig(oldAic.([]interface{}), newAic.([]interface{})) + } + change = true + } + if d.HasChange("stateful_disk") { updatedManager.StatefulPolicy = expandStatefulPolicy(d.Get("stateful_disk").(*schema.Set).List()) change = true diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group.go index b3e7a70829..3ac25f3bf6 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_network_endpoint_group.go @@ -43,7 +43,7 @@ func resourceComputeRegionNetworkEndpointGroup() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match @@ -179,7 +179,7 @@ and { service="bar2", tag="foo2" } respectively.`, }, }, }, - ConflictsWith: []string{"app_engine", "cloud_function", "serverless_deployment"}, + ConflictsWith: []string{"cloud_function", "app_engine", "serverless_deployment"}, }, "description": { Type: schema.TypeString, @@ -187,6 +187,15 @@ and { service="bar2", tag="foo2" } respectively.`, ForceNew: true, Description: `An optional description of this resource. Provide this property when you create the resource.`, + }, + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: `This field is only used for PSC. +The URL of the network to which all network endpoints in the NEG belong. Uses +"default" project network if unspecified.`, }, "network_endpoint_type": { Type: schema.TypeString, @@ -245,7 +254,15 @@ API Gateway: Unused, App Engine: The service version, Cloud Functions: Unused, C }, }, }, - ConflictsWith: []string{"cloud_run", "app_engine", "cloud_function"}, + ConflictsWith: []string{"cloud_run", "cloud_function", "app_engine"}, + }, + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: `This field is only used for PSC. +Optional URL of the subnetwork to which all network endpoints in the NEG belong.`, }, "project": { Type: schema.TypeString, @@ -294,6 +311,18 @@ func resourceComputeRegionNetworkEndpointGroupCreate(d *schema.ResourceData, met } else if v, ok := d.GetOkExists("psc_target_service"); !isEmptyValue(reflect.ValueOf(pscTargetServiceProp)) && (ok || !reflect.DeepEqual(v, pscTargetServiceProp)) { obj["pscTargetService"] = pscTargetServiceProp } + networkProp, err := expandComputeRegionNetworkEndpointGroupNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + subnetworkProp, err := expandComputeRegionNetworkEndpointGroupSubnetwork(d.Get("subnetwork"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subnetwork"); !isEmptyValue(reflect.ValueOf(subnetworkProp)) && (ok || !reflect.DeepEqual(v, subnetworkProp)) { + obj["subnetwork"] = subnetworkProp + } cloudRunProp, err := expandComputeRegionNetworkEndpointGroupCloudRun(d.Get("cloud_run"), d, config) if err != nil { return err @@ -417,6 +446,12 @@ func resourceComputeRegionNetworkEndpointGroupRead(d *schema.ResourceData, meta if err := d.Set("psc_target_service", flattenComputeRegionNetworkEndpointGroupPscTargetService(res["pscTargetService"], d, config)); err != nil { return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) } + if err := d.Set("network", flattenComputeRegionNetworkEndpointGroupNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("subnetwork", flattenComputeRegionNetworkEndpointGroupSubnetwork(res["subnetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } if err := d.Set("cloud_run", flattenComputeRegionNetworkEndpointGroupCloudRun(res["cloudRun"], d, config)); err != nil { return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) } @@ -521,6 +556,20 @@ func flattenComputeRegionNetworkEndpointGroupPscTargetService(v interface{}, d * return v } +func flattenComputeRegionNetworkEndpointGroupNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + return ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRegionNetworkEndpointGroupSubnetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + return ConvertSelfLinkToV1(v.(string)) +} + func flattenComputeRegionNetworkEndpointGroupCloudRun(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -654,6 +703,22 @@ func expandComputeRegionNetworkEndpointGroupPscTargetService(v interface{}, d Te return v, nil } +func expandComputeRegionNetworkEndpointGroupNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRegionNetworkEndpointGroupSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for subnetwork: %s", err) + } + return f.RelativeLink(), nil +} + func expandComputeRegionNetworkEndpointGroupCloudRun(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_ssl_certificate.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_ssl_certificate.go index 8dee902a4e..67f9b077cd 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_ssl_certificate.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_region_ssl_certificate.go @@ -68,7 +68,7 @@ The chain must include at least one intermediate cert.`, Computed: true, Optional: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy.go index ddc3c983ed..3502adc366 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_resource_policy.go @@ -94,7 +94,7 @@ exact number of VMs.`, }, }, }, - ConflictsWith: []string{"snapshot_schedule_policy", "instance_schedule_policy"}, + ConflictsWith: []string{"instance_schedule_policy", "snapshot_schedule_policy"}, }, "instance_schedule_policy": { Type: schema.TypeList, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_router.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_router.go index 6568a43ef4..41f239f9ed 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_router.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_router.go @@ -66,7 +66,7 @@ func resourceComputeRouter() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_security_policy.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_security_policy.go index 6155485e90..f9937fcca6 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_security_policy.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_security_policy.go @@ -35,7 +35,7 @@ func resourceComputeSecurityPolicy() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `The name of the security policy.`, }, @@ -54,10 +54,11 @@ func resourceComputeSecurityPolicy() *schema.Resource { }, "type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: `The type indicates the intended use of the security policy. CLOUD_ARMOR - Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE - Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache.`, + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The type indicates the intended use of the security policy. CLOUD_ARMOR - Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE - Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache.`, + ValidateFunc: validation.StringInSlice([]string{"CLOUD_ARMOR", "CLOUD_ARMOR_EDGE", "CLOUD_ARMOR_INTERNAL_SERVICE"}, false), }, "rule": { @@ -202,10 +203,11 @@ func resourceComputeSecurityPolicy() *schema.Resource { }, "enforce_on_key": { - Type: schema.TypeString, - Optional: true, - Default: "ALL", - Description: `Determines the key to enforce the rateLimitThreshold on`, + Type: schema.TypeString, + Optional: true, + Default: "ALL", + Description: `Determines the key to enforce the rateLimitThreshold on`, + ValidateFunc: validation.StringInSlice([]string{"ALL", "IP", "HTTP_HEADER", "XFF_IP", "HTTP_COOKIE"}, false), }, "enforce_on_key_name": { @@ -531,6 +533,11 @@ func resourceComputeSecurityPolicyUpdate(d *schema.ResourceData, meta interface{ securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "AdvancedOptionsConfig", "advancedOptionsConfig.jsonParsing", "advancedOptionsConfig.logLevel") } + if d.HasChange("adaptive_protection_config") { + securityPolicy.AdaptiveProtectionConfig = expandSecurityPolicyAdaptiveProtectionConfig(d.Get("adaptive_protection_config").([]interface{})) + securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "AdaptiveProtectionConfig", "adaptiveProtectionConfig.layer7DdosDefenseConfig.enable", "adaptiveProtectionConfig.layer7DdosDefenseConfig.ruleVisibility") + } + if len(securityPolicy.ForceSendFields) > 0 { client := config.NewComputeClient(userAgent) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ssl_certificate.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ssl_certificate.go index 20e8967387..da5238a047 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ssl_certificate.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_ssl_certificate.go @@ -68,7 +68,7 @@ The chain must include at least one intermediate cert.`, Computed: true, Optional: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_subnetwork.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_subnetwork.go index 59dd5e28cf..977e3f6615 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_subnetwork.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_subnetwork.go @@ -83,7 +83,7 @@ non-overlapping within a network. Only IPv4 is supported.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters @@ -250,7 +250,7 @@ secondary IP ranges within a network. Only IPv4 is supported.`, "range_name": { Type: schema.TypeString, Required: true, - ValidateFunc: validateGCPName, + ValidateFunc: validateGCEName, Description: `The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy.go index 275601d15f..987aca14d1 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_compute_target_https_proxy.go @@ -53,17 +53,6 @@ first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.`, }, - "ssl_certificates": { - Type: schema.TypeList, - Required: true, - Description: `A list of SslCertificate resources that are used to authenticate -connections between users and the load balancer. At least one SSL -certificate must be specified.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, "url_map": { Type: schema.TypeString, Required: true, @@ -71,6 +60,14 @@ certificate must be specified.`, Description: `A reference to the UrlMap resource that defines the mapping from URL to the BackendService.`, }, + "certificate_map": { + Type: schema.TypeString, + Optional: true, + Description: `A reference to the CertificateMap resource uri that identifies a certificate map +associated with the given target proxy. This field can only be set for global target proxies. +Accepted format is '//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificateMaps/{resourceName}'.`, + ExactlyOneOf: []string{"ssl_certificates", "certificate_map"}, + }, "description": { Type: schema.TypeString, Optional: true, @@ -96,6 +93,18 @@ specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Default value: "NONE" Possible values: ["NONE", "ENABLE", "DISABLE"]`, Default: "NONE", }, + "ssl_certificates": { + Type: schema.TypeList, + Optional: true, + Description: `A list of SslCertificate resources that are used to authenticate +connections between users and the load balancer. At least one SSL +certificate must be specified.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + ExactlyOneOf: []string{"ssl_certificates", "certificate_map"}, + }, "ssl_policy": { Type: schema.TypeString, Optional: true, @@ -161,6 +170,12 @@ func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interfac } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(reflect.ValueOf(sslCertificatesProp)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { obj["sslCertificates"] = sslCertificatesProp } + certificateMapProp, err := expandComputeTargetHttpsProxyCertificateMap(d.Get("certificate_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_map"); !isEmptyValue(reflect.ValueOf(certificateMapProp)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { + obj["certificateMap"] = certificateMapProp + } sslPolicyProp, err := expandComputeTargetHttpsProxySslPolicy(d.Get("ssl_policy"), d, config) if err != nil { return err @@ -278,6 +293,9 @@ func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{ if err := d.Set("ssl_certificates", flattenComputeTargetHttpsProxySslCertificates(res["sslCertificates"], d, config)); err != nil { return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) } + if err := d.Set("certificate_map", flattenComputeTargetHttpsProxyCertificateMap(res["certificateMap"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } if err := d.Set("ssl_policy", flattenComputeTargetHttpsProxySslPolicy(res["sslPolicy"], d, config)); err != nil { return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) } @@ -379,6 +397,40 @@ func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interfac return err } } + if d.HasChange("certificate_map") { + obj := make(map[string]interface{}) + + certificateMapProp, err := expandComputeTargetHttpsProxyCertificateMap(d.Get("certificate_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_map"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { + obj["certificateMap"] = certificateMapProp + } + + url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}/setCertificateMap") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) + } + + err = computeOperationWaitTime( + config, res, project, "Updating TargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } if d.HasChange("ssl_policy") { obj := make(map[string]interface{}) @@ -562,6 +614,10 @@ func flattenComputeTargetHttpsProxySslCertificates(v interface{}, d *schema.Reso return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) } +func flattenComputeTargetHttpsProxyCertificateMap(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenComputeTargetHttpsProxySslPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return v @@ -608,6 +664,10 @@ func expandComputeTargetHttpsProxySslCertificates(v interface{}, d TerraformReso return req, nil } +func expandComputeTargetHttpsProxyCertificateMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandComputeTargetHttpsProxySslPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { f, err := parseGlobalFieldValue("sslPolicies", v.(string), "project", d, config, true) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster.go index de8cda6c61..17589ac773 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_cluster.go @@ -117,7 +117,6 @@ func resourceContainerAwsCluster() *schema.Resource { Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Logging configuration.", MaxItems: 1, Elem: ContainerAwsClusterLoggingConfigSchema(), @@ -240,7 +239,6 @@ func ContainerAwsClusterControlPlaneSchema() *schema.Resource { "iam_instance_profile": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The name of the AWS IAM instance pofile to assign to each control plane replica.", }, @@ -543,7 +541,6 @@ func ContainerAwsClusterLoggingConfigSchema() *schema.Resource { Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Configuration of the logging components.", MaxItems: 1, Elem: ContainerAwsClusterLoggingConfigComponentConfigSchema(), @@ -559,7 +556,6 @@ func ContainerAwsClusterLoggingConfigComponentConfigSchema() *schema.Resource { Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Components of the logging configuration to be enabled.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -612,12 +608,12 @@ func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -634,7 +630,7 @@ func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) } else { client.Config.BasePath = bp } - res, err := client.ApplyCluster(context.Background(), obj, createDirective...) + res, err := client.ApplyCluster(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -1361,7 +1357,6 @@ func flattenContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray } return items } - func expandContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnum { objs := o.([]interface{}) items := make([]containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool.go index 93fc48f010..f512b39543 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_aws_node_pool.go @@ -192,7 +192,6 @@ func ContainerAwsNodePoolConfigSchema() *schema.Resource { "iam_instance_profile": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The name of the AWS IAM role assigned to nodes in the pool.", }, @@ -218,6 +217,7 @@ func ContainerAwsNodePoolConfigSchema() *schema.Resource { Type: schema.TypeString, Computed: true, Optional: true, + ForceNew: true, Description: "Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.", }, @@ -225,7 +225,7 @@ func ContainerAwsNodePoolConfigSchema() *schema.Resource { Type: schema.TypeMap, Optional: true, ForceNew: true, - Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -429,12 +429,12 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -451,7 +451,7 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} } else { client.Config.BasePath = bp } - res, err := client.ApplyNodePool(context.Background(), obj, createDirective...) + res, err := client.ApplyNodePool(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_client.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_client.go index 772e2912a2..0b710d61c9 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_client.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_client.go @@ -116,12 +116,12 @@ func resourceContainerAzureClientCreate(d *schema.ResourceData, meta interface{} Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClients/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -138,7 +138,7 @@ func resourceContainerAzureClientCreate(d *schema.ResourceData, meta interface{} } else { client.Config.BasePath = bp } - res, err := client.ApplyClient(context.Background(), obj, createDirective...) + res, err := client.ApplyClient(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster.go index 0894791d47..64f3bef45a 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_cluster.go @@ -132,7 +132,6 @@ func resourceContainerAzureCluster() *schema.Resource { Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Logging configuration.", MaxItems: 1, Elem: ContainerAzureClusterLoggingConfigSchema(), @@ -230,7 +229,6 @@ func ContainerAzureClusterControlPlaneSchema() *schema.Resource { "ssh_config": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: "SSH configuration for how to access the underlying control plane machines.", MaxItems: 1, Elem: ContainerAzureClusterControlPlaneSshConfigSchema(), @@ -319,7 +317,6 @@ func ContainerAzureClusterControlPlaneSshConfigSchema() *schema.Resource { "authorized_key": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", }, }, @@ -464,7 +461,6 @@ func ContainerAzureClusterLoggingConfigSchema() *schema.Resource { Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Configuration of the logging components.", MaxItems: 1, Elem: ContainerAzureClusterLoggingConfigComponentConfigSchema(), @@ -480,7 +476,6 @@ func ContainerAzureClusterLoggingConfigComponentConfigSchema() *schema.Resource Type: schema.TypeList, Computed: true, Optional: true, - ForceNew: true, Description: "Components of the logging configuration to be enabled.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -535,12 +530,12 @@ func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{ Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -557,7 +552,7 @@ func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{ } else { client.Config.BasePath = bp } - res, err := client.ApplyCluster(context.Background(), obj, createDirective...) + res, err := client.ApplyCluster(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -1253,7 +1248,6 @@ func flattenContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArr } return items } - func expandContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnum { objs := o.([]interface{}) items := make([]containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool.go index d2677c2ba0..fb6c696bf4 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_azure_node_pool.go @@ -48,7 +48,6 @@ func resourceContainerAzureNodePool() *schema.Resource { "autoscaling": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: "Autoscaler configuration for this node pool.", MaxItems: 1, Elem: ContainerAzureNodePoolAutoscalingSchema(), @@ -65,7 +64,6 @@ func resourceContainerAzureNodePool() *schema.Resource { "config": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: "The node configuration of the node pool.", MaxItems: 1, Elem: ContainerAzureNodePoolConfigSchema(), @@ -176,14 +174,12 @@ func ContainerAzureNodePoolAutoscalingSchema() *schema.Resource { "max_node_count": { Type: schema.TypeInt, Required: true, - ForceNew: true, Description: "Maximum number of nodes in the node pool. Must be >= min_node_count.", }, "min_node_count": { Type: schema.TypeInt, Required: true, - ForceNew: true, Description: "Minimum number of nodes in the node pool. Must be >= 1 and <= max_node_count.", }, }, @@ -196,7 +192,6 @@ func ContainerAzureNodePoolConfigSchema() *schema.Resource { "ssh_config": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: "SSH configuration for how to access the node pool machines.", MaxItems: 1, Elem: ContainerAzureNodePoolConfigSshConfigSchema(), @@ -254,7 +249,6 @@ func ContainerAzureNodePoolConfigSshConfigSchema() *schema.Resource { "authorized_key": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", }, }, @@ -329,12 +323,12 @@ func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -351,7 +345,7 @@ func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface } else { client.Config.BasePath = bp } - res, err := client.ApplyNodePool(context.Background(), obj, createDirective...) + res, err := client.ApplyNodePool(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go index ab85197c05..50a1e83763 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_container_cluster.go @@ -60,9 +60,9 @@ var ( "addons_config.0.network_policy_config", "addons_config.0.cloudrun_config", "addons_config.0.gcp_filestore_csi_driver_config", - "addons_config.0.istio_config", "addons_config.0.dns_cache_config", "addons_config.0.gce_persistent_disk_csi_driver_config", + "addons_config.0.istio_config", "addons_config.0.kalm_config", "addons_config.0.config_connector_config", "addons_config.0.gke_backup_agent_config", @@ -288,31 +288,6 @@ func resourceContainerCluster() *schema.Resource { }, }, }, - "istio_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: addonsConfigKeys, - MaxItems: 1, - Description: `The status of the Istio addon.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disabled": { - Type: schema.TypeBool, - Required: true, - Description: `The status of the Istio addon, which makes it easy to set up Istio for services in a cluster. It is disabled by default. Set disabled = false to enable.`, - }, - "auth": { - Type: schema.TypeString, - Optional: true, - // We can't use a Terraform-level default because it won't be true when the block is disabled: true - DiffSuppressFunc: emptyOrDefaultStringSuppress("AUTH_NONE"), - ValidateFunc: validation.StringInSlice([]string{"AUTH_NONE", "AUTH_MUTUAL_TLS"}, false), - Description: `The authentication type between services in Istio. Available options include AUTH_MUTUAL_TLS.`, - }, - }, - }, - }, "dns_cache_config": { Type: schema.TypeList, Optional: true, @@ -336,7 +311,7 @@ func resourceContainerCluster() *schema.Resource { Computed: true, AtLeastOneOf: addonsConfigKeys, MaxItems: 1, - Description: `Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Defaults to disabled; set enabled = true to enable.`, + Description: `Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Defaults to enabled; set disabled = true to disable.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -346,6 +321,31 @@ func resourceContainerCluster() *schema.Resource { }, }, }, + "istio_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Istio addon.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + Description: `The status of the Istio addon, which makes it easy to set up Istio for services in a cluster. It is disabled by default. Set disabled = false to enable.`, + }, + "auth": { + Type: schema.TypeString, + Optional: true, + // We can't use a Terraform-level default because it won't be true when the block is disabled: true + DiffSuppressFunc: emptyOrDefaultStringSuppress("AUTH_NONE"), + ValidateFunc: validation.StringInSlice([]string{"AUTH_NONE", "AUTH_MUTUAL_TLS"}, false), + Description: `The authentication type between services in Istio. Available options include AUTH_MUTUAL_TLS.`, + }, + }, + }, + }, "kalm_config": { Type: schema.TypeList, Optional: true, @@ -473,6 +473,12 @@ func resourceContainerCluster() *schema.Resource { DiffSuppressFunc: emptyOrDefaultStringSuppress("automatic"), Description: `Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as Intel Haswell.`, }, + "boot_disk_kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.`, + }, }, }, }, @@ -505,11 +511,38 @@ func resourceContainerCluster() *schema.Resource { }, "enable_binary_authorization": { - Default: false, Type: schema.TypeBool, Optional: true, + Default: false, + Deprecated: "Deprecated in favor of binary_authorization.", Description: `Enable Binary Authorization for this cluster. If enabled, all container images will be validated by Google Binary Authorization.`, - ConflictsWith: []string{"enable_autopilot"}, + ConflictsWith: []string{"enable_autopilot", "binary_authorization"}, + }, + "binary_authorization": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: BinaryAuthorizationDiffSuppress, + MaxItems: 1, + Description: "Configuration options for the Binary Authorization feature.", + ConflictsWith: []string{"enable_binary_authorization"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Deprecated: "Deprecated in favor of evaluation_mode.", + Description: "Enable Binary Authorization for this cluster.", + ConflictsWith: []string{"enable_autopilot", "binary_authorization.0.evaluation_mode"}, + }, + "evaluation_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "PROJECT_SINGLETON_POLICY_ENFORCE"}, false), + Description: "Mode of operation for Binary Authorization policy evaluation.", + ConflictsWith: []string{"binary_authorization.0.enabled"}, + }, + }, + }, }, "enable_kubernetes_alpha": { @@ -555,7 +588,6 @@ func resourceContainerCluster() *schema.Resource { Type: schema.TypeList, Optional: true, Computed: true, - ForceNew: true, MaxItems: 1, Description: `Configuration for the Google Groups for GKE feature.`, Elem: &schema.Resource{ @@ -563,7 +595,6 @@ func resourceContainerCluster() *schema.Resource { "security_group": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: `The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com.`, }, }, @@ -723,10 +754,10 @@ func resourceContainerCluster() *schema.Resource { Type: schema.TypeList, Optional: true, Computed: true, - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS and WORKLOADS.`, + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, SCHEDULER, and WORKLOADS.`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "WORKLOADS"}, false), + ValidateFunc: validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER", "WORKLOADS"}, false), }, }, "managed_prometheus": { @@ -1088,14 +1119,14 @@ func resourceContainerCluster() *schema.Resource { Required: true, ForceNew: true, DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, - Description: `Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking.`, + Description: `When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. This field only applies to private clusters, when enable_private_nodes is true.`, }, "enable_private_nodes": { Type: schema.TypeBool, Optional: true, ForceNew: true, DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, - Description: `When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. This field only applies to private clusters, when enable_private_nodes is true.`, + Description: `Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking.`, }, "master_ipv4_cidr_block": { Type: schema.TypeString, @@ -1216,6 +1247,23 @@ func resourceContainerCluster() *schema.Resource { }, }, + "mesh_certificates": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `If set, and enable_certificates=true, the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_certificates": { + Type: schema.TypeBool, + Required: true, + Description: `When enabled the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster.`, + }, + }, + }, + }, + "database_encryption": { Type: schema.TypeList, MaxItems: 1, @@ -1503,10 +1551,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er IpAllocationPolicy: ipAllocationBlock, PodSecurityPolicyConfig: expandPodSecurityPolicyConfig(d.Get("pod_security_policy_config")), Autoscaling: expandClusterAutoscaling(d.Get("cluster_autoscaling"), d), - BinaryAuthorization: &container.BinaryAuthorization{ - Enabled: d.Get("enable_binary_authorization").(bool), - ForceSendFields: []string{"Enabled"}, - }, + BinaryAuthorization: expandBinaryAuthorization(d.Get("binary_authorization"), d.Get("enable_binary_authorization").(bool)), Autopilot: &container.Autopilot{ Enabled: d.Get("enable_autopilot").(bool), ForceSendFields: []string{"Enabled"}, @@ -1614,6 +1659,10 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er cluster.VerticalPodAutoscaling = expandVerticalPodAutoscaling(v) } + if v, ok := d.GetOk("mesh_certificates"); ok { + cluster.MeshCertificates = expandMeshCertificates(v) + } + if v, ok := d.GetOk("database_encryption"); ok { cluster.DatabaseEncryption = expandDatabaseEncryption(v) } @@ -1856,8 +1905,17 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro if err := d.Set("cluster_autoscaling", flattenClusterAutoscaling(cluster.Autoscaling)); err != nil { return err } - if err := d.Set("enable_binary_authorization", cluster.BinaryAuthorization != nil && cluster.BinaryAuthorization.Enabled); err != nil { - return fmt.Errorf("Error setting enable_binary_authorization: %s", err) + binauthz_enabled := d.Get("binary_authorization.0.enabled").(bool) + legacy_binauthz_enabled := d.Get("enable_binary_authorization").(bool) + if !binauthz_enabled { + if err := d.Set("enable_binary_authorization", cluster.BinaryAuthorization != nil && cluster.BinaryAuthorization.Enabled); err != nil { + return fmt.Errorf("Error setting enable_binary_authorization: %s", err) + } + } + if !legacy_binauthz_enabled { + if err := d.Set("binary_authorization", flattenBinaryAuthorization(cluster.BinaryAuthorization)); err != nil { + return err + } } if cluster.Autopilot != nil { if err := d.Set("enable_autopilot", cluster.Autopilot.Enabled); err != nil { @@ -1948,6 +2006,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro return err } + if err := d.Set("mesh_certificates", flattenMeshCertificates(cluster.MeshCertificates)); err != nil { + return err + } + if err := d.Set("database_encryption", flattenDatabaseEncryption(cluster.DatabaseEncryption)); err != nil { return err } @@ -2098,6 +2160,22 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s's binary authorization has been updated to %v", d.Id(), enabled) } + if d.HasChange("binary_authorization") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredBinaryAuthorization: expandBinaryAuthorization(d.Get("binary_authorization"), d.Get("enable_binary_authorization").(bool)), + }, + } + + updateF := updateFunc(req, "updating GKE binary authorization") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's binary authorization has been updated to %v", d.Id(), req.Update.DesiredBinaryAuthorization) + } + if d.HasChange("enable_shielded_nodes") { enabled := d.Get("enable_shielded_nodes").(bool) req := &container.UpdateClusterRequest{ @@ -2255,6 +2333,21 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s L4 ILB Subsetting has been updated to %v", d.Id(), enabled) } + if d.HasChange("authenticator_groups_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredAuthenticatorGroupsConfig: expandContainerClusterAuthenticatorGroupsConfig(d.Get("authenticator_groups_config")), + }, + } + updateF := updateFunc(req, "updating GKE cluster authenticator groups config") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s authenticator groups config has been updated", d.Id()) + } + if d.HasChange("default_snat_status") { req := &container.UpdateClusterRequest{ Update: &container.ClusterUpdate{ @@ -2660,6 +2753,33 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("mesh_certificates") { + c := d.Get("mesh_certificates") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredMeshCertificates: expandMeshCertificates(c), + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + // Wait until it's updated + return containerOperationWait(config, op, project, location, "updating GKE cluster mesh certificates config", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s mesh certificates config has been updated", d.Id()) + } + if d.HasChange("database_encryption") { c := d.Get("database_encryption") req := &container.UpdateClusterRequest{ @@ -3115,15 +3235,6 @@ func expandClusterAddonsConfig(configured interface{}) *container.AddonsConfig { } } - if v, ok := config["istio_config"]; ok && len(v.([]interface{})) > 0 { - addon := v.([]interface{})[0].(map[string]interface{}) - ac.IstioConfig = &container.IstioConfig{ - Disabled: addon["disabled"].(bool), - Auth: addon["auth"].(string), - ForceSendFields: []string{"Disabled"}, - } - } - if v, ok := config["dns_cache_config"]; ok && len(v.([]interface{})) > 0 { addon := v.([]interface{})[0].(map[string]interface{}) ac.DnsCacheConfig = &container.DnsCacheConfig{ @@ -3140,6 +3251,15 @@ func expandClusterAddonsConfig(configured interface{}) *container.AddonsConfig { } } + if v, ok := config["istio_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.IstioConfig = &container.IstioConfig{ + Disabled: addon["disabled"].(bool), + Auth: addon["auth"].(string), + ForceSendFields: []string{"Disabled"}, + } + } + if v, ok := config["kalm_config"]; ok && len(v.([]interface{})) > 0 { addon := v.([]interface{})[0].(map[string]interface{}) ac.KalmConfig = &container.KalmConfig{ @@ -3346,6 +3466,7 @@ func expandAutoProvisioningDefaults(configured interface{}, d *schema.ResourceDa OauthScopes: convertStringArr(config["oauth_scopes"].([]interface{})), ServiceAccount: config["service_account"].(string), ImageType: config["image_type"].(string), + BootDiskKmsKey: config["boot_disk_kms_key"].(string), } cpu := config["min_cpu_platform"].(string) @@ -3402,6 +3523,21 @@ func expandNotificationConfig(configured interface{}) *container.NotificationCon } } +func expandBinaryAuthorization(configured interface{}, legacy_enabled bool) *container.BinaryAuthorization { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return &container.BinaryAuthorization{ + Enabled: legacy_enabled, + ForceSendFields: []string{"Enabled"}, + } + } + config := l[0].(map[string]interface{}) + return &container.BinaryAuthorization{ + Enabled: config["enabled"].(bool), + EvaluationMode: config["evaluation_mode"].(string), + } +} + func expandConfidentialNodes(configured interface{}) *container.ConfidentialNodes { l := configured.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -3518,6 +3654,18 @@ func expandVerticalPodAutoscaling(configured interface{}) *container.VerticalPod } } +func expandMeshCertificates(configured interface{}) *container.MeshCertificates { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &container.MeshCertificates{ + EnableCertificates: config["enable_certificates"].(bool), + ForceSendFields: []string{"EnableCertificates"}, + } +} + func expandDatabaseEncryption(configured interface{}) *container.DatabaseEncryption { l := configured.([]interface{}) if len(l) == 0 { @@ -3695,6 +3843,18 @@ func expandMonitoringConfig(configured interface{}) *container.MonitoringConfig return mc } +func expandContainerClusterAuthenticatorGroupsConfig(configured interface{}) *container.AuthenticatorGroupsConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.AuthenticatorGroupsConfig{ + SecurityGroup: config["security_group"].(string), + } +} + func flattenNotificationConfig(c *container.NotificationConfig) []map[string]interface{} { if c == nil { return nil @@ -3712,6 +3872,17 @@ func flattenNotificationConfig(c *container.NotificationConfig) []map[string]int } } +func flattenBinaryAuthorization(c *container.BinaryAuthorization) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + "evaluation_mode": c.EvaluationMode, + }) + } + return result +} + func flattenConfidentialNodes(c *container.ConfidentialNodes) []map[string]interface{} { result := []map[string]interface{}{} if c != nil { @@ -3785,15 +3956,6 @@ func flattenClusterAddonsConfig(c *container.AddonsConfig) []map[string]interfac result["cloudrun_config"] = []map[string]interface{}{cloudRunConfig} } - if c.IstioConfig != nil { - result["istio_config"] = []map[string]interface{}{ - { - "disabled": c.IstioConfig.Disabled, - "auth": c.IstioConfig.Auth, - }, - } - } - if c.DnsCacheConfig != nil { result["dns_cache_config"] = []map[string]interface{}{ { @@ -3810,6 +3972,15 @@ func flattenClusterAddonsConfig(c *container.AddonsConfig) []map[string]interfac } } + if c.IstioConfig != nil { + result["istio_config"] = []map[string]interface{}{ + { + "disabled": c.IstioConfig.Disabled, + "auth": c.IstioConfig.Auth, + }, + } + } + if c.KalmConfig != nil { result["kalm_config"] = []map[string]interface{}{ { @@ -4102,6 +4273,7 @@ func flattenAutoProvisioningDefaults(a *container.AutoprovisioningNodePoolDefaul r["service_account"] = a.ServiceAccount r["image_type"] = a.ImageType r["min_cpu_platform"] = a.MinCpuPlatform + r["boot_disk_kms_key"] = a.BootDiskKmsKey return []map[string]interface{}{r} } @@ -4160,6 +4332,17 @@ func flattenResourceUsageExportConfig(c *container.ResourceUsageExportConfig) [] } } +func flattenMeshCertificates(c *container.MeshCertificates) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "enable_certificates": c.EnableCertificates, + }, + } +} + func flattenDatabaseEncryption(c *container.DatabaseEncryption) []map[string]interface{} { if c == nil { return nil @@ -4426,3 +4609,15 @@ func containerClusterNetworkPolicyDiffSuppress(k, old, new string, r *schema.Res return false } + +func BinaryAuthorizationDiffSuppress(k, old, new string, r *schema.ResourceData) bool { + // An empty config is equivalent to a config with enabled set to false. + if k == "binary_authorization.#" && old == "1" && new == "0" { + o, _ := r.GetChange("binary_authorization.0.enabled") + if !o.(bool) && !r.HasChange("binary_authorization.0.evaluation_mode") { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go index a378619859..24780eb4aa 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataflow_job.go @@ -211,6 +211,7 @@ func resourceDataflowJob() *schema.Resource { "additional_experiments": { Type: schema.TypeSet, Optional: true, + Computed: true, Description: `List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].`, Elem: &schema.Schema{ Type: schema.TypeString, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_asset.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_asset.go new file mode 100644 index 0000000000..6734c76c19 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_asset.go @@ -0,0 +1,859 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" +) + +func resourceDataplexAsset() *schema.Resource { + return &schema.Resource{ + Create: resourceDataplexAssetCreate, + Read: resourceDataplexAssetRead, + Update: resourceDataplexAssetUpdate, + Delete: resourceDataplexAssetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataplexAssetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dataplex_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The zone for the resource", + }, + + "discovery_spec": { + Type: schema.TypeList, + Required: true, + Description: "Required. Specification of the discovery feature applied to data referenced by this asset. When this spec is left unset, the asset will use the spec set on the parent zone.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecSchema(), + }, + + "lake": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The lake for the resource", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The name of the asset.", + }, + + "resource_spec": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Specification of the resource that is referenced by this asset.", + MaxItems: 1, + Elem: DataplexAssetResourceSpecSchema(), + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the asset.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. User friendly display name.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User defined labels for the asset.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the asset was created.", + }, + + "discovery_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the discovery feature applied to data referenced by this asset.", + Elem: DataplexAssetDiscoveryStatusSchema(), + }, + + "resource_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the resource referenced by this asset.", + Elem: DataplexAssetResourceStatusSchema(), + }, + + "security_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the security policy applied to resource referenced by this asset.", + Elem: DataplexAssetSecurityStatusSchema(), + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Current state of the asset. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. System generated globally unique ID for the asset. This ID will be different if the asset is deleted and re-created with the same name.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the asset was last updated.", + }, + }, + } +} + +func DataplexAssetDiscoverySpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Required. Whether discovery is enabled.", + }, + + "csv_options": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for CSV data.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecCsvOptionsSchema(), + }, + + "exclude_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "include_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "json_options": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for Json data.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecJsonOptionsSchema(), + }, + + "schedule": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", + }, + }, + } +} + +func DataplexAssetDiscoverySpecCsvOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delimiter": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The delimiter being used to separate values. This defaults to ','.", + }, + + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + + "header_rows": { + Type: schema.TypeInt, + Optional: true, + Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", + }, + }, + } +} + +func DataplexAssetDiscoverySpecJsonOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + }, + } +} + +func DataplexAssetResourceSpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Type of resource. Possible values: STORAGE_BUCKET, BIGQUERY_DATASET", + }, + + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Immutable. Relative name of the cloud resource that contains the data that is being managed within a lake. For example: `projects/{project_number}/buckets/{bucket_id}` `projects/{project_number}/datasets/{dataset_id}`", + }, + }, + } +} + +func DataplexAssetDiscoveryStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "last_run_duration": { + Type: schema.TypeString, + Computed: true, + Description: "The duration of the last discovery run.", + }, + + "last_run_time": { + Type: schema.TypeString, + Computed: true, + Description: "The start time of the last discovery run.", + }, + + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current status of the discovery feature. Possible values: STATE_UNSPECIFIED, SCHEDULED, IN_PROGRESS, PAUSED, DISABLED", + }, + + "stats": { + Type: schema.TypeList, + Computed: true, + Description: "Data Stats of the asset reported by discovery.", + Elem: DataplexAssetDiscoveryStatusStatsSchema(), + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func DataplexAssetDiscoveryStatusStatsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_items": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of data items within the referenced resource.", + }, + + "data_size": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of stored data bytes within the referenced resource.", + }, + + "filesets": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of fileset entities within the referenced resource.", + }, + + "tables": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of table entities within the referenced resource.", + }, + }, + } +} + +func DataplexAssetResourceStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current state of the managed resource. Possible values: STATE_UNSPECIFIED, READY, ERROR", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func DataplexAssetSecurityStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current state of the security policy applied to the attached resource. Possible values: STATE_UNSPECIFIED, READY, APPLYING, ERROR", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func resourceDataplexAssetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyAsset(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Asset: %s", err) + } + + log.Printf("[DEBUG] Finished creating Asset %q: %#v", d.Id(), res) + + return resourceDataplexAssetRead(d, meta) +} + +func resourceDataplexAssetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetAsset(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataplexAsset %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("dataplex_zone", res.DataplexZone); err != nil { + return fmt.Errorf("error setting dataplex_zone in state: %s", err) + } + if err = d.Set("discovery_spec", flattenDataplexAssetDiscoverySpec(res.DiscoverySpec)); err != nil { + return fmt.Errorf("error setting discovery_spec in state: %s", err) + } + if err = d.Set("lake", res.Lake); err != nil { + return fmt.Errorf("error setting lake in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("resource_spec", flattenDataplexAssetResourceSpec(res.ResourceSpec)); err != nil { + return fmt.Errorf("error setting resource_spec in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("discovery_status", flattenDataplexAssetDiscoveryStatus(res.DiscoveryStatus)); err != nil { + return fmt.Errorf("error setting discovery_status in state: %s", err) + } + if err = d.Set("resource_status", flattenDataplexAssetResourceStatus(res.ResourceStatus)); err != nil { + return fmt.Errorf("error setting resource_status in state: %s", err) + } + if err = d.Set("security_status", flattenDataplexAssetSecurityStatus(res.SecurityStatus)); err != nil { + return fmt.Errorf("error setting security_status in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataplexAssetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyAsset(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Asset: %s", err) + } + + log.Printf("[DEBUG] Finished creating Asset %q: %#v", d.Id(), res) + + return resourceDataplexAssetRead(d, meta) +} + +func resourceDataplexAssetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Asset %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteAsset(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Asset: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Asset %q", d.Id()) + return nil +} + +func resourceDataplexAssetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)/assets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataplexAssetDiscoverySpec(o interface{}) *dataplex.AssetDiscoverySpec { + if o == nil { + return dataplex.EmptyAssetDiscoverySpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyAssetDiscoverySpec + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.AssetDiscoverySpec{ + Enabled: dcl.Bool(obj["enabled"].(bool)), + CsvOptions: expandDataplexAssetDiscoverySpecCsvOptions(obj["csv_options"]), + ExcludePatterns: expandStringArray(obj["exclude_patterns"]), + IncludePatterns: expandStringArray(obj["include_patterns"]), + JsonOptions: expandDataplexAssetDiscoverySpecJsonOptions(obj["json_options"]), + Schedule: dcl.String(obj["schedule"].(string)), + } +} + +func flattenDataplexAssetDiscoverySpec(obj *dataplex.AssetDiscoverySpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enabled": obj.Enabled, + "csv_options": flattenDataplexAssetDiscoverySpecCsvOptions(obj.CsvOptions), + "exclude_patterns": obj.ExcludePatterns, + "include_patterns": obj.IncludePatterns, + "json_options": flattenDataplexAssetDiscoverySpecJsonOptions(obj.JsonOptions), + "schedule": obj.Schedule, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetDiscoverySpecCsvOptions(o interface{}) *dataplex.AssetDiscoverySpecCsvOptions { + if o == nil { + return dataplex.EmptyAssetDiscoverySpecCsvOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyAssetDiscoverySpecCsvOptions + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.AssetDiscoverySpecCsvOptions{ + Delimiter: dcl.String(obj["delimiter"].(string)), + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + HeaderRows: dcl.Int64(int64(obj["header_rows"].(int))), + } +} + +func flattenDataplexAssetDiscoverySpecCsvOptions(obj *dataplex.AssetDiscoverySpecCsvOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "delimiter": obj.Delimiter, + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + "header_rows": obj.HeaderRows, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetDiscoverySpecJsonOptions(o interface{}) *dataplex.AssetDiscoverySpecJsonOptions { + if o == nil { + return dataplex.EmptyAssetDiscoverySpecJsonOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyAssetDiscoverySpecJsonOptions + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.AssetDiscoverySpecJsonOptions{ + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + } +} + +func flattenDataplexAssetDiscoverySpecJsonOptions(obj *dataplex.AssetDiscoverySpecJsonOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetResourceSpec(o interface{}) *dataplex.AssetResourceSpec { + if o == nil { + return dataplex.EmptyAssetResourceSpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyAssetResourceSpec + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.AssetResourceSpec{ + Type: dataplex.AssetResourceSpecTypeEnumRef(obj["type"].(string)), + Name: dcl.String(obj["name"].(string)), + } +} + +func flattenDataplexAssetResourceSpec(obj *dataplex.AssetResourceSpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "type": obj.Type, + "name": obj.Name, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetDiscoveryStatus(obj *dataplex.AssetDiscoveryStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "last_run_duration": obj.LastRunDuration, + "last_run_time": obj.LastRunTime, + "message": obj.Message, + "state": obj.State, + "stats": flattenDataplexAssetDiscoveryStatusStats(obj.Stats), + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetDiscoveryStatusStats(obj *dataplex.AssetDiscoveryStatusStats) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "data_items": obj.DataItems, + "data_size": obj.DataSize, + "filesets": obj.Filesets, + "tables": obj.Tables, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetResourceStatus(obj *dataplex.AssetResourceStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "message": obj.Message, + "state": obj.State, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetSecurityStatus(obj *dataplex.AssetSecurityStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "message": obj.Message, + "state": obj.State, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_lake.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_lake.go index 69fbc14617..ec74a868d1 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_lake.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_lake.go @@ -225,12 +225,12 @@ func resourceDataplexLakeCreate(d *schema.ResourceData, meta interface{}) error Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -247,7 +247,7 @@ func resourceDataplexLakeCreate(d *schema.ResourceData, meta interface{}) error } else { client.Config.BasePath = bp } - res, err := client.ApplyLake(context.Background(), obj, createDirective...) + res, err := client.ApplyLake(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_zone.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_zone.go new file mode 100644 index 0000000000..b0a1a12e75 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataplex_zone.go @@ -0,0 +1,688 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" +) + +func resourceDataplexZone() *schema.Resource { + return &schema.Resource{ + Create: resourceDataplexZoneCreate, + Read: resourceDataplexZoneRead, + Update: resourceDataplexZoneUpdate, + Delete: resourceDataplexZoneDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataplexZoneImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "discovery_spec": { + Type: schema.TypeList, + Required: true, + Description: "Required. Specification of the discovery feature applied to data in this zone.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecSchema(), + }, + + "lake": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The lake for the resource", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The name of the zone.", + }, + + "resource_spec": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Specification of the resources that are referenced by the assets within this zone.", + MaxItems: 1, + Elem: DataplexZoneResourceSpecSchema(), + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. The type of the zone. Possible values: TYPE_UNSPECIFIED, RAW, CURATED", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the zone.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. User friendly display name.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User defined labels for the zone.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "asset_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Aggregated status of the underlying assets of the zone.", + Elem: DataplexZoneAssetStatusSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the zone was created.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Current state of the zone. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. System generated globally unique ID for the zone. This ID will be different if the zone is deleted and re-created with the same name.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the zone was last updated.", + }, + }, + } +} + +func DataplexZoneDiscoverySpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Required. Whether discovery is enabled.", + }, + + "csv_options": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for CSV data.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecCsvOptionsSchema(), + }, + + "exclude_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "include_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "json_options": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for Json data.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecJsonOptionsSchema(), + }, + + "schedule": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", + }, + }, + } +} + +func DataplexZoneDiscoverySpecCsvOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delimiter": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The delimiter being used to separate values. This defaults to ','.", + }, + + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + + "header_rows": { + Type: schema.TypeInt, + Optional: true, + Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", + }, + }, + } +} + +func DataplexZoneDiscoverySpecJsonOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + }, + } +} + +func DataplexZoneResourceSpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. The location type of the resources that are allowed to be attached to the assets within this zone. Possible values: LOCATION_TYPE_UNSPECIFIED, SINGLE_REGION, MULTI_REGION", + }, + }, + } +} + +func DataplexZoneAssetStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "active_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of active assets.", + }, + + "security_policy_applying_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of assets that are in process of updating the security policy on attached resources.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func resourceDataplexZoneCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyZone(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Zone: %s", err) + } + + log.Printf("[DEBUG] Finished creating Zone %q: %#v", d.Id(), res) + + return resourceDataplexZoneRead(d, meta) +} + +func resourceDataplexZoneRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetZone(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataplexZone %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("discovery_spec", flattenDataplexZoneDiscoverySpec(res.DiscoverySpec)); err != nil { + return fmt.Errorf("error setting discovery_spec in state: %s", err) + } + if err = d.Set("lake", res.Lake); err != nil { + return fmt.Errorf("error setting lake in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("resource_spec", flattenDataplexZoneResourceSpec(res.ResourceSpec)); err != nil { + return fmt.Errorf("error setting resource_spec in state: %s", err) + } + if err = d.Set("type", res.Type); err != nil { + return fmt.Errorf("error setting type in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("asset_status", flattenDataplexZoneAssetStatus(res.AssetStatus)); err != nil { + return fmt.Errorf("error setting asset_status in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataplexZoneUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyZone(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Zone: %s", err) + } + + log.Printf("[DEBUG] Finished creating Zone %q: %#v", d.Id(), res) + + return resourceDataplexZoneRead(d, meta) +} + +func resourceDataplexZoneDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataplex.Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Zone %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteZone(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Zone: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Zone %q", d.Id()) + return nil +} + +func resourceDataplexZoneImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataplexZoneDiscoverySpec(o interface{}) *dataplex.ZoneDiscoverySpec { + if o == nil { + return dataplex.EmptyZoneDiscoverySpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyZoneDiscoverySpec + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.ZoneDiscoverySpec{ + Enabled: dcl.Bool(obj["enabled"].(bool)), + CsvOptions: expandDataplexZoneDiscoverySpecCsvOptions(obj["csv_options"]), + ExcludePatterns: expandStringArray(obj["exclude_patterns"]), + IncludePatterns: expandStringArray(obj["include_patterns"]), + JsonOptions: expandDataplexZoneDiscoverySpecJsonOptions(obj["json_options"]), + Schedule: dcl.StringOrNil(obj["schedule"].(string)), + } +} + +func flattenDataplexZoneDiscoverySpec(obj *dataplex.ZoneDiscoverySpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enabled": obj.Enabled, + "csv_options": flattenDataplexZoneDiscoverySpecCsvOptions(obj.CsvOptions), + "exclude_patterns": obj.ExcludePatterns, + "include_patterns": obj.IncludePatterns, + "json_options": flattenDataplexZoneDiscoverySpecJsonOptions(obj.JsonOptions), + "schedule": obj.Schedule, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneDiscoverySpecCsvOptions(o interface{}) *dataplex.ZoneDiscoverySpecCsvOptions { + if o == nil { + return dataplex.EmptyZoneDiscoverySpecCsvOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyZoneDiscoverySpecCsvOptions + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.ZoneDiscoverySpecCsvOptions{ + Delimiter: dcl.String(obj["delimiter"].(string)), + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + HeaderRows: dcl.Int64(int64(obj["header_rows"].(int))), + } +} + +func flattenDataplexZoneDiscoverySpecCsvOptions(obj *dataplex.ZoneDiscoverySpecCsvOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "delimiter": obj.Delimiter, + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + "header_rows": obj.HeaderRows, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneDiscoverySpecJsonOptions(o interface{}) *dataplex.ZoneDiscoverySpecJsonOptions { + if o == nil { + return dataplex.EmptyZoneDiscoverySpecJsonOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyZoneDiscoverySpecJsonOptions + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.ZoneDiscoverySpecJsonOptions{ + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + } +} + +func flattenDataplexZoneDiscoverySpecJsonOptions(obj *dataplex.ZoneDiscoverySpecJsonOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneResourceSpec(o interface{}) *dataplex.ZoneResourceSpec { + if o == nil { + return dataplex.EmptyZoneResourceSpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return dataplex.EmptyZoneResourceSpec + } + obj := objArr[0].(map[string]interface{}) + return &dataplex.ZoneResourceSpec{ + LocationType: dataplex.ZoneResourceSpecLocationTypeEnumRef(obj["location_type"].(string)), + } +} + +func flattenDataplexZoneResourceSpec(obj *dataplex.ZoneResourceSpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "location_type": obj.LocationType, + } + + return []interface{}{transformed} + +} + +func flattenDataplexZoneAssetStatus(obj *dataplex.ZoneAssetStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "active_assets": obj.ActiveAssets, + "security_policy_applying_assets": obj.SecurityPolicyApplyingAssets, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_cluster.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_cluster.go index 1066ca1e43..30fd6bf662 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_cluster.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_cluster.go @@ -60,9 +60,9 @@ var ( "cluster_config.0.initialization_action", "cluster_config.0.encryption_config", "cluster_config.0.autoscaling_config", + "cluster_config.0.metastore_config", "cluster_config.0.lifecycle_config", "cluster_config.0.endpoint_config", - "cluster_config.0.metastore_config", } ) @@ -636,6 +636,23 @@ by Dataproc`, }, }, }, + "metastore_config": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + MaxItems: 1, + Description: `Specifies a Metastore configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataproc_metastore_service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Resource name of an existing Dataproc Metastore service.`, + }, + }, + }, + }, "lifecycle_config": { Type: schema.TypeList, Optional: true, @@ -698,23 +715,6 @@ by Dataproc`, }, }, }, - "metastore_config": { - Type: schema.TypeList, - Optional: true, - AtLeastOneOf: clusterConfigKeys, - MaxItems: 1, - Description: `Specifies a Metastore configuration.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataproc_metastore_service": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource name of an existing Dataproc Metastore service.`, - }, - }, - }, - }, }, }, }, @@ -889,6 +889,7 @@ func resourceDataprocClusterCreate(d *schema.ResourceData, meta interface{}) err } cluster.Config, err = expandClusterConfig(d, config) + if err != nil { return err } @@ -974,6 +975,10 @@ func expandClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.Clus conf.AutoscalingConfig = expandAutoscalingConfig(cfg) } + if cfg, ok := configOptions(d, "cluster_config.0.metastore_config"); ok { + conf.MetastoreConfig = expandMetastoreConfig(cfg) + } + if cfg, ok := configOptions(d, "cluster_config.0.lifecycle_config"); ok { conf.LifecycleConfig = expandLifecycleConfig(cfg) } @@ -982,10 +987,6 @@ func expandClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.Clus conf.EndpointConfig = expandEndpointConfig(cfg) } - if cfg, ok := configOptions(d, "cluster_config.0.metastore_config"); ok { - conf.MetastoreConfig = expandMetastoreConfig(cfg) - } - if cfg, ok := configOptions(d, "cluster_config.0.master_config"); ok { log.Println("[INFO] got master_config") conf.MasterConfig = expandInstanceGroupConfig(cfg) @@ -1433,7 +1434,10 @@ func resourceDataprocClusterRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error setting labels: %s", err) } - cfg, err := flattenClusterConfig(d, cluster.Config) + var cfg []map[string]interface{} + + cfg, err = flattenClusterConfig(d, cluster.Config) + if err != nil { return err } @@ -1460,9 +1464,9 @@ func flattenClusterConfig(d *schema.ResourceData, cfg *dataproc.ClusterConfig) ( "autoscaling_config": flattenAutoscalingConfig(d, cfg.AutoscalingConfig), "security_config": flattenSecurityConfig(d, cfg.SecurityConfig), "preemptible_worker_config": flattenPreemptibleInstanceGroupConfig(d, cfg.SecondaryWorkerConfig), + "metastore_config": flattenMetastoreConfig(d, cfg.MetastoreConfig), "lifecycle_config": flattenLifecycleConfig(d, cfg.LifecycleConfig), "endpoint_config": flattenEndpointConfig(d, cfg.EndpointConfig), - "metastore_config": flattenMetastoreConfig(d, cfg.MetastoreConfig), } if len(cfg.InitializationActions) > 0 { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation.go new file mode 100644 index 0000000000..1fa44c1a45 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_federation.go @@ -0,0 +1,511 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceDataprocMetastoreFederation() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocMetastoreFederationCreate, + Read: resourceDataprocMetastoreFederationRead, + Update: resourceDataprocMetastoreFederationUpdate, + Delete: resourceDataprocMetastoreFederationDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataprocMetastoreFederationImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "backend_metastores": { + Type: schema.TypeSet, + Required: true, + Description: `A map from BackendMetastore rank to BackendMetastores from which the federation service serves metadata at query time. The map key represents the order in which BackendMetastores should be evaluated to resolve database names at query time and should be greater than or equal to zero. A BackendMetastore with a lower number will be evaluated before a BackendMetastore with a higher number.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rank": { + Type: schema.TypeString, + Required: true, + }, + "metastore_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateEnum([]string{"METASTORE_TYPE_UNSPECIFIED", "DATAPROC_METASTORE"}), + Description: `The type of the backend metastore. Possible values: ["METASTORE_TYPE_UNSPECIFIED", "DATAPROC_METASTORE"]`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `The relative resource name of the metastore that is being federated.`, + }, + }, + }, + }, + "federation_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the metastore federation. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), +and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between +3 and 63 characters.`, + }, + "version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apache Hive metastore version of the federation. All backend metastore versions must be compatible with the federation version.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-defined labels for the metastore federation.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location where the metastore federation should reside.`, + }, + "endpoint_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the endpoint used to access the metastore federation.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The relative resource name of the metastore federation.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the metastore federation.`, + }, + "state_message": { + Type: schema.TypeString, + Computed: true, + Description: `Additional information about the current state of the metastore federation, if available.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `The globally unique resource identifier of the metastore federation.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataprocMetastoreFederationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandDataprocMetastoreFederationLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + versionProp, err := expandDataprocMetastoreFederationVersion(d.Get("version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version"); !isEmptyValue(reflect.ValueOf(versionProp)) && (ok || !reflect.DeepEqual(v, versionProp)) { + obj["version"] = versionProp + } + backendMetastoresProp, err := expandDataprocMetastoreFederationBackendMetastores(d.Get("backend_metastores"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_metastores"); !isEmptyValue(reflect.ValueOf(backendMetastoresProp)) && (ok || !reflect.DeepEqual(v, backendMetastoresProp)) { + obj["backendMetastores"] = backendMetastoresProp + } + + url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/federations?federationId={{federation_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Federation: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Federation: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Federation: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/federations/{{federation_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = dataprocMetastoreOperationWaitTime( + config, res, project, "Creating Federation", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Federation: %s", err) + } + + log.Printf("[DEBUG] Finished creating Federation %q: %#v", d.Id(), res) + + return resourceDataprocMetastoreFederationRead(d, meta) +} + +func resourceDataprocMetastoreFederationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/federations/{{federation_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Federation: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("DataprocMetastoreFederation %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + + if err := d.Set("name", flattenDataprocMetastoreFederationName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("labels", flattenDataprocMetastoreFederationLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("endpoint_uri", flattenDataprocMetastoreFederationEndpointUri(res["endpointUri"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("state", flattenDataprocMetastoreFederationState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("state_message", flattenDataprocMetastoreFederationStateMessage(res["stateMessage"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("uid", flattenDataprocMetastoreFederationUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("version", flattenDataprocMetastoreFederationVersion(res["version"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + if err := d.Set("backend_metastores", flattenDataprocMetastoreFederationBackendMetastores(res["backendMetastores"], d, config)); err != nil { + return fmt.Errorf("Error reading Federation: %s", err) + } + + return nil +} + +func resourceDataprocMetastoreFederationUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Federation: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandDataprocMetastoreFederationLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + backendMetastoresProp, err := expandDataprocMetastoreFederationBackendMetastores(d.Get("backend_metastores"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_metastores"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backendMetastoresProp)) { + obj["backendMetastores"] = backendMetastoresProp + } + + url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/federations/{{federation_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Federation %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("backend_metastores") { + updateMask = append(updateMask, "backendMetastores") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating Federation %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Federation %q: %#v", d.Id(), res) + } + + err = dataprocMetastoreOperationWaitTime( + config, res, project, "Updating Federation", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceDataprocMetastoreFederationRead(d, meta) +} + +func resourceDataprocMetastoreFederationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Federation: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/federations/{{federation_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Federation %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "Federation") + } + + err = dataprocMetastoreOperationWaitTime( + config, res, project, "Deleting Federation", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Federation %q: %#v", d.Id(), res) + return nil +} + +func resourceDataprocMetastoreFederationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/federations/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/federations/{{federation_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataprocMetastoreFederationName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationEndpointUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationStateMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationBackendMetastores(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(l)) + for k, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "rank": k, + "name": flattenDataprocMetastoreFederationBackendMetastoresName(original["name"], d, config), + "metastore_type": flattenDataprocMetastoreFederationBackendMetastoresMetastoreType(original["metastoreType"], d, config), + }) + } + return transformed +} +func flattenDataprocMetastoreFederationBackendMetastoresName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreFederationBackendMetastoresMetastoreType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandDataprocMetastoreFederationLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocMetastoreFederationVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreFederationBackendMetastores(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataprocMetastoreFederationBackendMetastoresName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedMetastoreType, err := expandDataprocMetastoreFederationBackendMetastoresMetastoreType(original["metastore_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetastoreType); val.IsValid() && !isEmptyValue(val) { + transformed["metastoreType"] = transformedMetastoreType + } + + transformedRank, err := expandString(original["rank"], d, config) + if err != nil { + return nil, err + } + m[transformedRank] = transformed + } + return m, nil +} + +func expandDataprocMetastoreFederationBackendMetastoresName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreFederationBackendMetastoresMetastoreType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service.go index cc77379b3a..80c811a2d4 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_metastore_service.go @@ -50,6 +50,14 @@ func resourceDataprocMetastoreService() *schema.Resource { and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 63 characters.`, }, + "database_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateEnum([]string{"MYSQL", "SPANNER", ""}), + Description: `The database type that the Metastore service stores its data. Default value: "MYSQL" Possible values: ["MYSQL", "SPANNER"]`, + Default: "MYSQL", + }, "encryption_config": { Type: schema.TypeList, Optional: true, @@ -81,6 +89,35 @@ Use the following format: 'projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/c ForceNew: true, Description: `The Hive metastore schema version.`, }, + "auxiliary_versions": { + Type: schema.TypeSet, + Optional: true, + Description: `A mapping of Hive metastore version to the auxiliary version configuration. +When specified, a secondary Hive metastore service is created along with the primary service. +All auxiliary versions must be less than the service's primary version. +The key is the auxiliary service name and it must match the regular expression a-z?. +This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "version": { + Type: schema.TypeString, + Required: true, + Description: `The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.`, + }, + "config_overrides": { + Type: schema.TypeMap, + Optional: true, + Description: `A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. +If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, "config_overrides": { Type: schema.TypeMap, Computed: true, @@ -90,6 +127,14 @@ Use the following format: 'projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/c The mappings override system defaults (some keys cannot be overridden)`, Elem: &schema.Schema{Type: schema.TypeString}, }, + "endpoint_protocol": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateEnum([]string{"THRIFT", "GRPC", ""}), + Description: `The protocol to use for the metastore service endpoint. If unspecified, defaults to 'THRIFT'. Default value: "THRIFT" Possible values: ["THRIFT", "GRPC"]`, + Default: "THRIFT", + }, "kerberos_config": { Type: schema.TypeList, Optional: true, @@ -140,7 +185,7 @@ The mappings override system defaults (some keys cannot be overridden)`, Type: schema.TypeString, Optional: true, ForceNew: true, - Description: `The location where the autoscaling policy should reside. + Description: `The location where the metastore service should reside. The default value is 'global'.`, Default: "global", }, @@ -148,7 +193,8 @@ The default value is 'global'.`, Type: schema.TypeList, Optional: true, Description: `The one hour maintenance window of the metastore service. -This specifies when the service can be restarted for maintenance purposes in UTC time.`, +This specifies when the service can be restarted for maintenance purposes in UTC time. +Maintenance window is not needed for services with the 'SPANNER' database type.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -166,6 +212,31 @@ This specifies when the service can be restarted for maintenance purposes in UTC }, }, }, + "metadata_integration": { + Type: schema.TypeList, + Optional: true, + Description: `The setting that defines how metastore metadata should be integrated with external services and systems.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_catalog_config": { + Type: schema.TypeList, + Required: true, + Description: `The integration config for the Data Catalog service.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.`, + }, + }, + }, + }, + }, + }, + }, "network": { Type: schema.TypeString, Computed: true, @@ -181,6 +252,14 @@ This specifies when the service can be restarted for maintenance purposes in UTC Optional: true, Description: `The TCP port at which the metastore service is reached. Default: 9083.`, }, + "release_channel": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateEnum([]string{"CANARY", "STABLE", ""}), + Description: `The release channel of the service. If unspecified, defaults to 'STABLE'. Default value: "STABLE" Possible values: ["CANARY", "STABLE"]`, + Default: "STABLE", + }, "tier": { Type: schema.TypeString, Computed: true, @@ -213,6 +292,11 @@ This specifies when the service can be restarted for maintenance purposes in UTC Computed: true, Description: `Additional information about the current state of the metastore service, if available.`, }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `The globally unique resource identifier of the metastore service.`, + }, "project": { Type: schema.TypeString, Optional: true, @@ -274,6 +358,24 @@ func resourceDataprocMetastoreServiceCreate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("hive_metastore_config"); !isEmptyValue(reflect.ValueOf(hiveMetastoreConfigProp)) && (ok || !reflect.DeepEqual(v, hiveMetastoreConfigProp)) { obj["hiveMetastoreConfig"] = hiveMetastoreConfigProp } + databaseTypeProp, err := expandDataprocMetastoreServiceDatabaseType(d.Get("database_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("database_type"); !isEmptyValue(reflect.ValueOf(databaseTypeProp)) && (ok || !reflect.DeepEqual(v, databaseTypeProp)) { + obj["databaseType"] = databaseTypeProp + } + releaseChannelProp, err := expandDataprocMetastoreServiceReleaseChannel(d.Get("release_channel"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("release_channel"); !isEmptyValue(reflect.ValueOf(releaseChannelProp)) && (ok || !reflect.DeepEqual(v, releaseChannelProp)) { + obj["releaseChannel"] = releaseChannelProp + } + metadataIntegrationProp, err := expandDataprocMetastoreServiceMetadataIntegration(d.Get("metadata_integration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata_integration"); !isEmptyValue(reflect.ValueOf(metadataIntegrationProp)) && (ok || !reflect.DeepEqual(v, metadataIntegrationProp)) { + obj["metadataIntegration"] = metadataIntegrationProp + } url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services?serviceId={{service_id}}") if err != nil { @@ -391,6 +493,18 @@ func resourceDataprocMetastoreServiceRead(d *schema.ResourceData, meta interface if err := d.Set("hive_metastore_config", flattenDataprocMetastoreServiceHiveMetastoreConfig(res["hiveMetastoreConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Service: %s", err) } + if err := d.Set("database_type", flattenDataprocMetastoreServiceDatabaseType(res["databaseType"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("release_channel", flattenDataprocMetastoreServiceReleaseChannel(res["releaseChannel"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("uid", flattenDataprocMetastoreServiceUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("metadata_integration", flattenDataprocMetastoreServiceMetadataIntegration(res["metadataIntegration"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } return nil } @@ -447,6 +561,12 @@ func resourceDataprocMetastoreServiceUpdate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("hive_metastore_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hiveMetastoreConfigProp)) { obj["hiveMetastoreConfig"] = hiveMetastoreConfigProp } + metadataIntegrationProp, err := expandDataprocMetastoreServiceMetadataIntegration(d.Get("metadata_integration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata_integration"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataIntegrationProp)) { + obj["metadataIntegration"] = metadataIntegrationProp + } url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services/{{service_id}}") if err != nil { @@ -479,6 +599,10 @@ func resourceDataprocMetastoreServiceUpdate(d *schema.ResourceData, meta interfa if d.HasChange("hive_metastore_config") { updateMask = append(updateMask, "hiveMetastoreConfig") } + + if d.HasChange("metadata_integration") { + updateMask = append(updateMask, "metadataIntegration") + } // updateMask is a URL parameter but not present in the schema, so replaceVars // won't set it url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -686,14 +810,22 @@ func flattenDataprocMetastoreServiceHiveMetastoreConfig(v interface{}, d *schema return nil } transformed := make(map[string]interface{}) + transformed["endpoint_protocol"] = + flattenDataprocMetastoreServiceHiveMetastoreConfigEndpointProtocol(original["endpointProtocol"], d, config) transformed["version"] = flattenDataprocMetastoreServiceHiveMetastoreConfigVersion(original["version"], d, config) transformed["config_overrides"] = flattenDataprocMetastoreServiceHiveMetastoreConfigConfigOverrides(original["configOverrides"], d, config) transformed["kerberos_config"] = flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfig(original["kerberosConfig"], d, config) + transformed["auxiliary_versions"] = + flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersions(original["auxiliaryVersions"], d, config) return []interface{}{transformed} } +func flattenDataprocMetastoreServiceHiveMetastoreConfigEndpointProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenDataprocMetastoreServiceHiveMetastoreConfigVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -744,6 +876,72 @@ func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigG return v } +func flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersions(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(l)) + for k, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "key": k, + "version": flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsVersion(original["version"], d, config), + "config_overrides": flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsConfigOverrides(original["configOverrides"], d, config), + }) + } + return transformed +} +func flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsConfigOverrides(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceDatabaseType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceReleaseChannel(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceMetadataIntegration(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["data_catalog_config"] = + flattenDataprocMetastoreServiceMetadataIntegrationDataCatalogConfig(original["dataCatalogConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataprocMetastoreServiceMetadataIntegrationDataCatalogConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenDataprocMetastoreServiceMetadataIntegrationDataCatalogConfigEnabled(original["enabled"], d, config) + return []interface{}{transformed} +} +func flattenDataprocMetastoreServiceMetadataIntegrationDataCatalogConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func expandDataprocMetastoreServiceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil @@ -833,6 +1031,13 @@ func expandDataprocMetastoreServiceHiveMetastoreConfig(v interface{}, d Terrafor original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) + transformedEndpointProtocol, err := expandDataprocMetastoreServiceHiveMetastoreConfigEndpointProtocol(original["endpoint_protocol"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndpointProtocol); val.IsValid() && !isEmptyValue(val) { + transformed["endpointProtocol"] = transformedEndpointProtocol + } + transformedVersion, err := expandDataprocMetastoreServiceHiveMetastoreConfigVersion(original["version"], d, config) if err != nil { return nil, err @@ -854,9 +1059,20 @@ func expandDataprocMetastoreServiceHiveMetastoreConfig(v interface{}, d Terrafor transformed["kerberosConfig"] = transformedKerberosConfig } + transformedAuxiliaryVersions, err := expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersions(original["auxiliary_versions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAuxiliaryVersions); val.IsValid() && !isEmptyValue(val) { + transformed["auxiliaryVersions"] = transformedAuxiliaryVersions + } + return transformed, nil } +func expandDataprocMetastoreServiceHiveMetastoreConfigEndpointProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandDataprocMetastoreServiceHiveMetastoreConfigVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -935,3 +1151,100 @@ func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigPrincipal(v func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigGcsUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } + +func expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersions(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVersion, err := expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedConfigOverrides, err := expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsConfigOverrides(original["config_overrides"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConfigOverrides); val.IsValid() && !isEmptyValue(val) { + transformed["configOverrides"] = transformedConfigOverrides + } + + transformedKey, err := expandString(original["key"], d, config) + if err != nil { + return nil, err + } + m[transformedKey] = transformed + } + return m, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfigAuxiliaryVersionsConfigOverrides(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocMetastoreServiceDatabaseType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceReleaseChannel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceMetadataIntegration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDataCatalogConfig, err := expandDataprocMetastoreServiceMetadataIntegrationDataCatalogConfig(original["data_catalog_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataCatalogConfig); val.IsValid() && !isEmptyValue(val) { + transformed["dataCatalogConfig"] = transformedDataCatalogConfig + } + + return transformed, nil +} + +func expandDataprocMetastoreServiceMetadataIntegrationDataCatalogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandDataprocMetastoreServiceMetadataIntegrationDataCatalogConfigEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + return transformed, nil +} + +func expandDataprocMetastoreServiceMetadataIntegrationDataCatalogConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_workflow_template.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_workflow_template.go index db36d7badf..4b49861740 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_workflow_template.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dataproc_workflow_template.go @@ -383,7 +383,7 @@ func DataprocWorkflowTemplateJobsHiveJobQueryListSchema() *schema.Resource { Type: schema.TypeList, Required: true, ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -473,7 +473,7 @@ func DataprocWorkflowTemplateJobsPigJobQueryListSchema() *schema.Resource { Type: schema.TypeList, Required: true, ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -562,7 +562,7 @@ func DataprocWorkflowTemplateJobsPrestoJobQueryListSchema() *schema.Resource { Type: schema.TypeList, Required: true, ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -900,7 +900,7 @@ func DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchema() *schema.Resource { Type: schema.TypeList, Required: true, ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\" { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -2121,12 +2121,12 @@ func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interfa Version: dcl.Int64OrNil(int64(d.Get("version").(int))), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -2143,7 +2143,7 @@ func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interfa } else { client.Config.BasePath = bp } - res, err := client.ApplyWorkflowTemplate(context.Background(), obj, createDirective...) + res, err := client.ApplyWorkflowTemplate(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -4169,7 +4169,6 @@ func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigO } return items } - func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsArray(o interface{}) []dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { objs := o.([]interface{}) items := make([]dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum, 0, len(objs)) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set.go index b33c2e3b79..6cf1ae6a34 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_dns_record_set.go @@ -13,7 +13,7 @@ import ( ) func rrdatasDnsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - if k == "rrdatas.#" && new == "0" && old != new { + if k == "rrdatas.#" && (new == "0" || new == "") && old != new { return false } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor.go index c71d9069a6..89e1862bd3 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor.go @@ -134,7 +134,7 @@ func resourceDocumentAIProcessorCreate(d *schema.ResourceData, meta interface{}) } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/processors/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -152,7 +152,7 @@ func resourceDocumentAIProcessorRead(d *schema.ResourceData, meta interface{}) e return err } - url, err := replaceVars(d, config, "{{DocumentAIBasePath}}{{name}}") + url, err := replaceVars(d, config, "{{DocumentAIBasePath}}projects/{{project}}/locations/{{location}}/processors/{{name}}") if err != nil { return err } @@ -210,7 +210,7 @@ func resourceDocumentAIProcessorDelete(d *schema.ResourceData, meta interface{}) } billingProject = project - url, err := replaceVars(d, config, "{{DocumentAIBasePath}}{{name}}") + url, err := replaceVars(d, config, "{{DocumentAIBasePath}}projects/{{project}}/locations/{{location}}/processors/{{name}}") if err != nil { return err } @@ -235,13 +235,15 @@ func resourceDocumentAIProcessorDelete(d *schema.ResourceData, meta interface{}) func resourceDocumentAIProcessorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*Config) if err := parseImportId([]string{ - "(?P.+)", + "projects/(?P[^/]+)/locations/(?P[^/]+)/processors/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", }, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/processors/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -251,7 +253,10 @@ func resourceDocumentAIProcessorImport(d *schema.ResourceData, meta interface{}) } func flattenDocumentAIProcessorName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + if v == nil { + return v + } + return NameFromSelfLinkStateFunc(v) } func flattenDocumentAIProcessorType(v interface{}, d *schema.ResourceData, config *Config) interface{} { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_default_version.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_default_version.go index bcf59c8bac..e0ae811ff4 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_default_version.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_document_ai_processor_default_version.go @@ -18,6 +18,7 @@ import ( "fmt" "log" "reflect" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -46,10 +47,11 @@ func resourceDocumentAIProcessorDefaultVersion() *schema.Resource { Description: `The processor to set the version on.`, }, "version": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The version to set`, + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: projectNumberDiffSuppress, + Description: `The version to set`, }, }, UseJSONNumber: true, @@ -90,6 +92,11 @@ func resourceDocumentAIProcessorDefaultVersionCreate(d *schema.ResourceData, met billingProject = bp } + if strings.Contains(url, "https://-") { + location := GetRegionFromRegionalSelfLink(url) + url = strings.TrimPrefix(url, "https://") + url = "https://" + location + url + } res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) if err != nil { return fmt.Errorf("Error creating ProcessorDefaultVersion: %s", err) @@ -126,6 +133,11 @@ func resourceDocumentAIProcessorDefaultVersionRead(d *schema.ResourceData, meta billingProject = bp } + if strings.Contains(url, "https://-") { + location := GetRegionFromRegionalSelfLink(url) + url = strings.TrimPrefix(url, "https://") + url = "https://" + location + url + } res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("DocumentAIProcessorDefaultVersion %q", d.Id())) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_eventarc_trigger.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_eventarc_trigger.go index 5ae3025709..78ee4d1af3 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_eventarc_trigger.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_eventarc_trigger.go @@ -311,12 +311,12 @@ func resourceEventarcTriggerCreate(d *schema.ResourceData, meta interface{}) err Transport: expandEventarcTriggerTransport(d.Get("transport")), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -333,7 +333,7 @@ func resourceEventarcTriggerCreate(d *schema.ResourceData, meta interface{}) err } else { client.Config.BasePath = bp } - res, err := client.ApplyTrigger(context.Background(), obj, createDirective...) + res, err := client.ApplyTrigger(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_filestore_instance.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_filestore_instance.go index 23cbaf5bcb..b085c66a1f 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_filestore_instance.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_filestore_instance.go @@ -152,9 +152,10 @@ IP addresses assigned. Possible values: ["ADDRESS_MODE_UNSPECIFIED", "MODE_IPV4" }, }, "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, Description: `The name of the GCE VPC network to which the instance is connected.`, }, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_release.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_release.go index e0c943ca01..5f66cc19ed 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_release.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_release.go @@ -49,14 +49,14 @@ func resourceFirebaserulesRelease() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "Format: `projects/{project_id}/releases/{release_id}`", + Description: "Format: `projects/{project_id}/releases/{release_id}`\\Firestore Rules Releases will **always** have the name 'cloud.firestore'", }, "ruleset_name": { Type: schema.TypeString, Required: true, DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must exist the `Release` to be created.", + Description: "Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must exist for the `Release` to be created.", }, "project": { @@ -102,12 +102,12 @@ func resourceFirebaserulesReleaseCreate(d *schema.ResourceData, meta interface{} Project: dcl.String(project), } - id, err := replaceVars(d, config, "projects/{{project}}/releases/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -124,7 +124,7 @@ func resourceFirebaserulesReleaseCreate(d *schema.ResourceData, meta interface{} } else { client.Config.BasePath = bp } - res, err := client.ApplyRelease(context.Background(), obj, createDirective...) + res, err := client.ApplyRelease(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_ruleset.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_ruleset.go index 855b9fbe8d..e849f86eb8 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_ruleset.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_firebaserules_ruleset.go @@ -156,12 +156,12 @@ func resourceFirebaserulesRulesetCreate(d *schema.ResourceData, meta interface{} Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/rulesets/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -178,7 +178,7 @@ func resourceFirebaserulesRulesetCreate(d *schema.ResourceData, meta interface{} } else { client.Config.BasePath = bp } - res, err := client.ApplyRuleset(context.Background(), obj, createDirective...) + res, err := client.ApplyRuleset(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -191,10 +191,11 @@ func resourceFirebaserulesRulesetCreate(d *schema.ResourceData, meta interface{} if err = d.Set("name", res.Name); err != nil { return fmt.Errorf("error setting name in state: %s", err) } - // Id has a server-generated value, set again after creation - id, err = replaceVarsForId(d, config, "projects/{{project}}/rulesets/{{name}}") + // ID has a server-generated value, set again after creation. + + id, err = res.ID() if err != nil { - return fmt.Errorf("Error constructing id: %s", err) + return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature.go index d8c02e4de5..ca75ab9813 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature.go @@ -221,12 +221,12 @@ func resourceGkeHubFeatureCreate(d *schema.ResourceData, meta interface{}) error mutexKV.Lock(lockName) defer mutexKV.Unlock(lockName) - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/features/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -243,7 +243,7 @@ func resourceGkeHubFeatureCreate(d *schema.ResourceData, meta interface{}) error } else { client.Config.BasePath = bp } - res, err := client.ApplyFeature(context.Background(), obj, createDirective...) + res, err := client.ApplyFeature(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership.go index 25f01d2eea..47a21b504d 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_feature_membership.go @@ -197,7 +197,7 @@ func GkeHubFeatureMembershipConfigmanagementConfigSyncGitSchema() *schema.Resour "secret_type": { Type: schema.TypeString, Optional: true, - Description: "Type of secret configured for access to the Git repo.", + Description: "Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount or none. The validation of this is case-sensitive.", }, "sync_branch": { @@ -279,6 +279,21 @@ func GkeHubFeatureMembershipConfigmanagementPolicyControllerSchema() *schema.Res Description: "Logs all denies and dry run failures.", }, + "monitoring": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Specifies the backends Policy Controller should export metrics to. For example, to specify metrics should be exported to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\", \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringSchema(), + }, + + "mutation_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enable or disable mutation in policy controller. If true, mutation CRDs, webhook and controller deployment will be deployed to the cluster.", + }, + "referential_rules_enabled": { Type: schema.TypeBool, Optional: true, @@ -294,6 +309,20 @@ func GkeHubFeatureMembershipConfigmanagementPolicyControllerSchema() *schema.Res } } +func GkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backends": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: " Specifies the list of backends Policy Controller will export to. Specifying an empty value `[]` disables metrics export.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + func resourceGkeHubFeatureMembershipCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) project, err := getProject(d, config) @@ -320,7 +349,7 @@ func resourceGkeHubFeatureMembershipCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -337,7 +366,7 @@ func resourceGkeHubFeatureMembershipCreate(d *schema.ResourceData, meta interfac } else { client.Config.BasePath = bp } - res, err := client.ApplyFeatureMembership(context.Background(), obj, createDirective...) + res, err := client.ApplyFeatureMembership(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -702,6 +731,8 @@ func expandGkeHubFeatureMembershipConfigmanagementPolicyController(o interface{} Enabled: dcl.Bool(obj["enabled"].(bool)), ExemptableNamespaces: expandStringArray(obj["exemptable_namespaces"]), LogDeniesEnabled: dcl.Bool(obj["log_denies_enabled"].(bool)), + Monitoring: expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj["monitoring"]), + MutationEnabled: dcl.Bool(obj["mutation_enabled"].(bool)), ReferentialRulesEnabled: dcl.Bool(obj["referential_rules_enabled"].(bool)), TemplateLibraryInstalled: dcl.Bool(obj["template_library_installed"].(bool)), } @@ -716,6 +747,8 @@ func flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj *gkehub. "enabled": obj.Enabled, "exemptable_namespaces": obj.ExemptableNamespaces, "log_denies_enabled": obj.LogDeniesEnabled, + "monitoring": flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj.Monitoring), + "mutation_enabled": obj.MutationEnabled, "referential_rules_enabled": obj.ReferentialRulesEnabled, "template_library_installed": obj.TemplateLibraryInstalled, } @@ -723,3 +756,48 @@ func flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj *gkehub. return []interface{}{transformed} } + +func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(o interface{}) *gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring{ + Backends: expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj["backends"]), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj *gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "backends": flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj.Backends), + } + + return []interface{}{transformed} + +} +func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj []gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(o interface{}) []gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + objs := o.([]interface{}) + items := make([]gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum, 0, len(objs)) + for _, item := range objs { + i := gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership.go index 3ba8c7e618..8af1158eec 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_gke_hub_membership.go @@ -70,6 +70,7 @@ https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity`, "issuer": { Type: schema.TypeString, Required: true, + ForceNew: true, Description: `A JSON Web Token (JWT) issuer URI. 'issuer' must start with 'https://' and // be a valid with length <2000 characters. For example: 'https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster' (must be 'locations' rather than 'zones'). If the cluster is provisioned with Terraform, this is '"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}"'.`, }, @@ -195,7 +196,7 @@ func resourceGKEHubMembershipCreate(d *schema.ResourceData, meta interface{}) er } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -218,7 +219,7 @@ func resourceGKEHubMembershipCreate(d *schema.ResourceData, meta interface{}) er } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{name}}") + id, err = replaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -236,7 +237,7 @@ func resourceGKEHubMembershipRead(d *schema.ResourceData, meta interface{}) erro return err } - url, err := replaceVars(d, config, "{{GKEHubBasePath}}{{name}}") + url, err := replaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") if err != nil { return err } @@ -382,7 +383,7 @@ func resourceGKEHubMembershipDelete(d *schema.ResourceData, meta interface{}) er } billingProject = project - url, err := replaceVars(d, config, "{{GKEHubBasePath}}{{name}}") + url, err := replaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") if err != nil { return err } @@ -415,13 +416,15 @@ func resourceGKEHubMembershipDelete(d *schema.ResourceData, meta interface{}) er func resourceGKEHubMembershipImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*Config) if err := parseImportId([]string{ - "(?P.+)", + "projects/(?P[^/]+)/locations/global/memberships/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", }, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key.go index 4421d49882..77187e6fcf 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_kms_crypto_key.go @@ -92,10 +92,10 @@ If not specified at creation time, the default duration is 24 hours.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", ""}), + ValidateFunc: validateEnum([]string{"ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", "MAC", ""}), Description: `The immutable purpose of this CryptoKey. See the [purpose reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) -for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT"]`, +for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", "MAC"]`, Default: "ENCRYPT_DECRYPT", }, "rotation_period": { @@ -132,7 +132,7 @@ See the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v Type: schema.TypeString, Optional: true, ForceNew: true, - Description: `The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL". Defaults to "SOFTWARE".`, + Description: `The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL", "EXTERNAL_VPC". Defaults to "SOFTWARE".`, Default: "SOFTWARE", }, }, @@ -430,6 +430,12 @@ func resourceKMSCryptoKeyImport(d *schema.ResourceData, meta interface{}) ([]*sc return nil, fmt.Errorf("Error setting skip_initial_version_creation: %s", err) } + id, err := replaceVars(d, config, "{{key_ring}}/cryptoKeys/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return []*schema.ResourceData{d}, nil } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_logging_log_view.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_logging_log_view.go index f3685dbc43..da9a118387 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_logging_log_view.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_logging_log_view.go @@ -121,7 +121,7 @@ func resourceLoggingLogViewCreate(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -138,7 +138,7 @@ func resourceLoggingLogViewCreate(d *schema.ResourceData, meta interface{}) erro } else { client.Config.BasePath = bp } - res, err := client.ApplyLogView(context.Background(), obj, createDirective...) + res, err := client.ApplyLogView(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_manager_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_manager_operation.go index de7e0a2f51..1b91980d94 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_manager_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_manager_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_alert_policy.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_alert_policy.go index 8c0a8d4eff..e7540f37b8 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_alert_policy.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_alert_policy.go @@ -302,6 +302,14 @@ alerted on quickly.`, Required: true, Description: `Monitoring Query Language query that outputs a boolean stream.`, }, + "evaluation_missing_data": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP", ""}), + Description: `A condition control that determines how +metric-threshold conditions are evaluated when +data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]`, + }, "trigger": { Type: schema.TypeList, Optional: true, @@ -627,6 +635,14 @@ contain restrictions on resource type, resource labels, and metric labels. This field may not exceed 2048 Unicode characters in length.`, + }, + "evaluation_missing_data": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP", ""}), + Description: `A condition control that determines how +metric-threshold conditions are evaluated when +data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]`, }, "filter": { Type: schema.TypeString, @@ -1395,6 +1411,8 @@ func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(v in flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(original["duration"], d, config) transformed["trigger"] = flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(original["trigger"], d, config) + transformed["evaluation_missing_data"] = + flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(original["evaluationMissingData"], d, config) return []interface{}{transformed} } func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -1441,6 +1459,10 @@ func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigg return v // let terraform core handle it otherwise } +func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -1466,6 +1488,8 @@ func flattenMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d * flattenMonitoringAlertPolicyConditionsConditionThresholdAggregations(original["aggregations"], d, config) transformed["filter"] = flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(original["filter"], d, config) + transformed["evaluation_missing_data"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(original["evaluationMissingData"], d, config) return []interface{}{transformed} } func flattenMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -1598,6 +1622,10 @@ func flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{ return v } +func flattenMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenMonitoringAlertPolicyConditionsDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1936,6 +1964,13 @@ func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(v int transformed["trigger"] = transformedTrigger } + transformedEvaluationMissingData, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(original["evaluation_missing_data"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEvaluationMissingData); val.IsValid() && !isEmptyValue(val) { + transformed["evaluationMissingData"] = transformedEvaluationMissingData + } + return transformed, nil } @@ -1981,6 +2016,10 @@ func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigge return v, nil } +func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -2046,6 +2085,13 @@ func expandMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d Te transformed["filter"] = transformedFilter } + transformedEvaluationMissingData, err := expandMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(original["evaluation_missing_data"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEvaluationMissingData); val.IsValid() && !isEmptyValue(val) { + transformed["evaluationMissingData"] = transformedEvaluationMissingData + } + return transformed, nil } @@ -2221,6 +2267,10 @@ func expandMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{} return v, nil } +func expandMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandMonitoringAlertPolicyConditionsDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project.go index 7a271baf46..2197c9a31e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_monitored_project.go @@ -74,12 +74,12 @@ func resourceMonitoringMonitoredProjectCreate(d *schema.ResourceData, meta inter Name: dcl.String(d.Get("name").(string)), } - id, err := replaceVarsForId(d, config, "locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -97,7 +97,7 @@ func resourceMonitoringMonitoredProjectCreate(d *schema.ResourceData, meta inter } else { client.Config.BasePath = bp } - res, err := client.ApplyMonitoredProject(context.Background(), obj, createDirective...) + res, err := client.ApplyMonitoredProject(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config.go index 19e3ad7c9c..91327d169b 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_monitoring_uptime_check_config.go @@ -114,6 +114,26 @@ func resourceMonitoringUptimeCheckConfig() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "accepted_response_status_codes": { + Type: schema.TypeList, + Optional: true, + Description: `If present, the check will only pass if the HTTP response status code is in this set of status codes. If empty, the HTTP status code will only pass if the HTTP status code is 200-299.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status_class": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"STATUS_CLASS_1XX", "STATUS_CLASS_2XX", "STATUS_CLASS_3XX", "STATUS_CLASS_4XX", "STATUS_CLASS_5XX", "STATUS_CLASS_ANY", ""}), + Description: `A class of status codes to accept. Possible values: ["STATUS_CLASS_1XX", "STATUS_CLASS_2XX", "STATUS_CLASS_3XX", "STATUS_CLASS_4XX", "STATUS_CLASS_5XX", "STATUS_CLASS_ANY"]`, + }, + "status_value": { + Type: schema.TypeInt, + Optional: true, + Description: `A status code to accept.`, + }, + }, + }, + }, "auth_info": { Type: schema.TypeList, Optional: true, @@ -790,6 +810,8 @@ func flattenMonitoringUptimeCheckConfigHttpCheck(v interface{}, d *schema.Resour flattenMonitoringUptimeCheckConfigHttpCheckMaskHeaders(original["maskHeaders"], d, config) transformed["body"] = flattenMonitoringUptimeCheckConfigHttpCheckBody(original["body"], d, config) + transformed["accepted_response_status_codes"] = + flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(original["acceptedResponseStatusCodes"], d, config) return []interface{}{transformed} } func flattenMonitoringUptimeCheckConfigHttpCheckRequestMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -864,6 +886,46 @@ func flattenMonitoringUptimeCheckConfigHttpCheckBody(v interface{}, d *schema.Re return v } +func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "status_value": flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(original["statusValue"], d, config), + "status_class": flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(original["statusClass"], d, config), + }) + } + return transformed +} +func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenMonitoringUptimeCheckConfigTcpCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -1118,6 +1180,13 @@ func expandMonitoringUptimeCheckConfigHttpCheck(v interface{}, d TerraformResour transformed["body"] = transformedBody } + transformedAcceptedResponseStatusCodes, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(original["accepted_response_status_codes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceptedResponseStatusCodes); val.IsValid() && !isEmptyValue(val) { + transformed["acceptedResponseStatusCodes"] = transformedAcceptedResponseStatusCodes + } + return transformed, nil } @@ -1198,6 +1267,43 @@ func expandMonitoringUptimeCheckConfigHttpCheckBody(v interface{}, d TerraformRe return v, nil } +func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStatusValue, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(original["status_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStatusValue); val.IsValid() && !isEmptyValue(val) { + transformed["statusValue"] = transformedStatusValue + } + + transformedStatusClass, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(original["status_class"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStatusClass); val.IsValid() && !isEmptyValue(val) { + transformed["statusClass"] = transformedStatusClass + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandMonitoringUptimeCheckConfigTcpCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_hub.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_hub.go index 8f26b0e59c..62d2c02bde 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_hub.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_hub.go @@ -134,12 +134,12 @@ func resourceNetworkConnectivityHubCreate(d *schema.ResourceData, meta interface Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/global/hubs/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -156,7 +156,7 @@ func resourceNetworkConnectivityHubCreate(d *schema.ResourceData, meta interface } else { client.Config.BasePath = bp } - res, err := client.ApplyHub(context.Background(), obj, createDirective...) + res, err := client.ApplyHub(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_spoke.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_spoke.go index 83de162588..fff5485a32 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_spoke.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_network_connectivity_spoke.go @@ -249,12 +249,12 @@ func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interfa Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/spokes/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -271,7 +271,7 @@ func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interfa } else { client.Config.BasePath = bp } - res, err := client.ApplySpoke(context.Background(), obj, createDirective...) + res, err := client.ApplySpoke(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_instance.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_instance.go index 4f24e3965e..5498db3c0f 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_instance.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_instance.go @@ -117,8 +117,8 @@ If not specified, this defaults to 100.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", ""}), - Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED"]`, + ValidateFunc: validateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME", ""}), + Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME"]`, }, "container_image": { Type: schema.TypeList, @@ -165,9 +165,9 @@ If not specified, this defaults to 100.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", ""}), + ValidateFunc: validateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME", ""}), DiffSuppressFunc: emptyOrDefaultStringSuppress("DISK_TYPE_UNSPECIFIED"), - Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED"]`, + Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME"]`, }, "disk_encryption": { Type: schema.TypeString, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime.go index e5fb4651ae..43115bc4f2 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_notebooks_runtime.go @@ -75,7 +75,7 @@ func resourceNotebooksRuntime() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: `The name specified for the Notebook instance.`, + Description: `The name specified for the Notebook runtime.`, }, "access_config": { Type: schema.TypeList, @@ -143,6 +143,26 @@ Default: 180 minutes`, Optional: true, Description: `Install Nvidia Driver automatically.`, }, + "kernels": { + Type: schema.TypeList, + Optional: true, + Description: `Use a list of container images to use as Kernels in the notebook instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repository": { + Type: schema.TypeString, + Required: true, + Description: `The path to the container image repository. +For example: gcr.io/{project_id}/{imageName}`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `The tag of the container image. If not specified, this defaults to the latest tag.`, + }, + }, + }, + }, "notebook_upgrade_schedule": { Type: schema.TypeString, Optional: true, @@ -156,6 +176,17 @@ Please follow the [cron format](https://en.wikipedia.org/wiki/Cron).`, fully boots up. The path must be a URL or Cloud Storage path (gs://path-to-file/file-name).`, }, + "post_startup_script_behavior": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"POST_STARTUP_SCRIPT_BEHAVIOR_UNSPECIFIED", "RUN_EVERY_START", "DOWNLOAD_AND_RUN_EVERY_START", ""}), + Description: `Behavior for the post startup script. Possible values: ["POST_STARTUP_SCRIPT_BEHAVIOR_UNSPECIFIED", "RUN_EVERY_START", "DOWNLOAD_AND_RUN_EVERY_START"]`, + }, + "upgradeable": { + Type: schema.TypeBool, + Computed: true, + Description: `Bool indicating whether an newer image is available in an image family.`, + }, }, }, }, @@ -357,6 +388,7 @@ rest/v1/projects.locations.runtimes#AcceleratorType'`, Type: schema.TypeList, Computed: true, Optional: true, + ForceNew: true, Description: `Use a list of container images to start the notebook instance.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -377,6 +409,7 @@ For example: gcr.io/{project_id}/{imageName}`, "encryption_config": { Type: schema.TypeList, Optional: true, + ForceNew: true, Description: `Encryption settings for virtual machine data disk.`, MaxItems: 1, Elem: &schema.Resource{ @@ -396,6 +429,7 @@ It has the following format: "internal_ip_only": { Type: schema.TypeBool, Optional: true, + ForceNew: true, Description: `If true, runtime will only have internal IP addresses. By default, runtimes are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each vm. This @@ -429,6 +463,7 @@ _metadata)).`, "network": { Type: schema.TypeString, Optional: true, + ForceNew: true, Description: `The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork. If neither 'network' nor 'subnet' is specified, the "default" network of the project is @@ -447,13 +482,22 @@ Runtimes support the following network configurations: "nic_type": { Type: schema.TypeString, Optional: true, + ForceNew: true, ValidateFunc: validateEnum([]string{"UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC", ""}), Description: `The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. Possible values: ["UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC"]`, + }, + "reserved_ip_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Reserved IP Range name is used for VPC Peering. The +subnetwork allocation will use the range *name* if it's assigned.`, }, "shielded_instance_config": { Type: schema.TypeList, Optional: true, + ForceNew: true, Description: `Shielded VM Instance configuration settings.`, MaxItems: 1, Elem: &schema.Resource{ @@ -489,6 +533,7 @@ default.`, "subnet": { Type: schema.TypeString, Optional: true, + ForceNew: true, Description: `The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network. A full URL or partial URI are valid. Examples: @@ -756,13 +801,35 @@ func resourceNotebooksRuntimeUpdate(d *schema.ResourceData, meta interface{}) er } log.Printf("[DEBUG] Updating Runtime %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("virtual_machine") { + updateMask = append(updateMask, "virtualMachine") + } + + if d.HasChange("access_config") { + updateMask = append(updateMask, "accessConfig") + } + + if d.HasChange("software_config") { + updateMask = append(updateMask, "softwareConfig.idleShutdown", + "softwareConfig.idleShutdownTimeout", + "softwareConfig.customGpuDriverPath", + "softwareConfig.postStartupScript") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } // err == nil indicates that the billing_project value was found if bp, err := getBillingProject(d, config); err == nil { billingProject = bp } - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) if err != nil { return fmt.Errorf("Error updating Runtime %q: %s", d.Id(), err) @@ -910,6 +977,8 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfig(v interface{}, d flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigLabels(original["labels"], d, config) transformed["nic_type"] = flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigNicType(original["nicType"], d, config) + transformed["reserved_ip_range"] = + flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(original["reservedIpRange"], d, config) return []interface{}{transformed} } func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -1207,6 +1276,10 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigNicType(v interfac return v } +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenNotebooksRuntimeState(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1263,10 +1336,16 @@ func flattenNotebooksRuntimeSoftwareConfig(v interface{}, d *schema.ResourceData flattenNotebooksRuntimeSoftwareConfigIdleShutdownTimeout(original["idleShutdownTimeout"], d, config) transformed["install_gpu_driver"] = flattenNotebooksRuntimeSoftwareConfigInstallGpuDriver(original["installGpuDriver"], d, config) + transformed["upgradeable"] = + flattenNotebooksRuntimeSoftwareConfigUpgradeable(original["upgradeable"], d, config) transformed["custom_gpu_driver_path"] = flattenNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(original["customGpuDriverPath"], d, config) transformed["post_startup_script"] = flattenNotebooksRuntimeSoftwareConfigPostStartupScript(original["postStartupScript"], d, config) + transformed["post_startup_script_behavior"] = + flattenNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(original["postStartupScriptBehavior"], d, config) + transformed["kernels"] = + flattenNotebooksRuntimeSoftwareConfigKernels(original["kernels"], d, config) return []interface{}{transformed} } func flattenNotebooksRuntimeSoftwareConfigNotebookUpgradeSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { @@ -1302,6 +1381,10 @@ func flattenNotebooksRuntimeSoftwareConfigInstallGpuDriver(v interface{}, d *sch return v } +func flattenNotebooksRuntimeSoftwareConfigUpgradeable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1310,6 +1393,37 @@ func flattenNotebooksRuntimeSoftwareConfigPostStartupScript(v interface{}, d *sc return v } +func flattenNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenNotebooksRuntimeSoftwareConfigKernels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "repository": flattenNotebooksRuntimeSoftwareConfigKernelsRepository(original["repository"], d, config), + "tag": flattenNotebooksRuntimeSoftwareConfigKernelsTag(original["tag"], d, config), + }) + } + return transformed +} +func flattenNotebooksRuntimeSoftwareConfigKernelsRepository(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenNotebooksRuntimeSoftwareConfigKernelsTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenNotebooksRuntimeMetrics(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -1482,6 +1596,13 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfig(v interface{}, d T transformed["nicType"] = transformedNicType } + transformedReservedIpRange, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(original["reserved_ip_range"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReservedIpRange); val.IsValid() && !isEmptyValue(val) { + transformed["reservedIpRange"] = transformedReservedIpRange + } + return transformed, nil } @@ -1899,6 +2020,10 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigNicType(v interface return v, nil } +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandNotebooksRuntimeAccessConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -1988,6 +2113,13 @@ func expandNotebooksRuntimeSoftwareConfig(v interface{}, d TerraformResourceData transformed["installGpuDriver"] = transformedInstallGpuDriver } + transformedUpgradeable, err := expandNotebooksRuntimeSoftwareConfigUpgradeable(original["upgradeable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUpgradeable); val.IsValid() && !isEmptyValue(val) { + transformed["upgradeable"] = transformedUpgradeable + } + transformedCustomGpuDriverPath, err := expandNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(original["custom_gpu_driver_path"], d, config) if err != nil { return nil, err @@ -2002,6 +2134,20 @@ func expandNotebooksRuntimeSoftwareConfig(v interface{}, d TerraformResourceData transformed["postStartupScript"] = transformedPostStartupScript } + transformedPostStartupScriptBehavior, err := expandNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(original["post_startup_script_behavior"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPostStartupScriptBehavior); val.IsValid() && !isEmptyValue(val) { + transformed["postStartupScriptBehavior"] = transformedPostStartupScriptBehavior + } + + transformedKernels, err := expandNotebooksRuntimeSoftwareConfigKernels(original["kernels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKernels); val.IsValid() && !isEmptyValue(val) { + transformed["kernels"] = transformedKernels + } + return transformed, nil } @@ -2025,6 +2171,10 @@ func expandNotebooksRuntimeSoftwareConfigInstallGpuDriver(v interface{}, d Terra return v, nil } +func expandNotebooksRuntimeSoftwareConfigUpgradeable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -2032,3 +2182,44 @@ func expandNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(v interface{}, d Te func expandNotebooksRuntimeSoftwareConfigPostStartupScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } + +func expandNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksRuntimeSoftwareConfigKernels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRepository, err := expandNotebooksRuntimeSoftwareConfigKernelsRepository(original["repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !isEmptyValue(val) { + transformed["repository"] = transformedRepository + } + + transformedTag, err := expandNotebooksRuntimeSoftwareConfigKernelsTag(original["tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { + transformed["tag"] = transformedTag + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNotebooksRuntimeSoftwareConfigKernelsRepository(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksRuntimeSoftwareConfigKernelsTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_org_policy_policy.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_org_policy_policy.go index b6c6b722f9..7ce8dbfcaa 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_org_policy_policy.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_org_policy_policy.go @@ -212,7 +212,7 @@ func resourceOrgPolicyPolicyCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -229,7 +229,7 @@ func resourceOrgPolicyPolicyCreate(d *schema.ResourceData, meta interface{}) err } else { client.Config.BasePath = bp } - res, err := client.ApplyPolicy(context.Background(), obj, createDirective...) + res, err := client.ApplyPolicy(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_os_policy_assignment.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_os_policy_assignment.go index cc653d6e21..760c02191c 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_os_policy_assignment.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_os_config_os_policy_assignment.go @@ -1326,12 +1326,12 @@ func resourceOsConfigOsPolicyAssignmentCreate(d *schema.ResourceData, meta inter Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -1349,7 +1349,7 @@ func resourceOsConfigOsPolicyAssignmentCreate(d *schema.ResourceData, meta inter } else { client.Config.BasePath = bp } - res, err := client.ApplyOSPolicyAssignment(context.Background(), obj, createDirective...) + res, err := client.ApplyOSPolicyAssignment(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate.go index 13f391704b..6155a88bdc 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate.go @@ -73,10 +73,13 @@ omitted, no template will be used. This template must be in the same location as the Certificate.`, }, "certificate_authority": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Certificate Authority name.`, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Certificate Authority ID that should issue the certificate. For example, to issue a Certificate from +a Certificate Authority with resource name 'projects/my-project/locations/us-central1/caPools/my-pool/certificateAuthorities/my-ca', +argument 'pool' should be set to 'projects/my-project/locations/us-central1/caPools/my-pool', argument 'certificate_authority' +should be set to 'my-ca'.`, }, "config": { Type: schema.TypeList, @@ -1129,7 +1132,7 @@ This is in RFC3339 text format.`, "issuer_certificate_authority": { Type: schema.TypeString, Computed: true, - Description: `The resource name of the issuing CertificateAuthority in the format projects/*/locations/*/caPools/*/certificateAuthorities/*.`, + Description: `The resource name of the issuing CertificateAuthority in the format 'projects/*/locations/*/caPools/*/certificateAuthorities/*'.`, }, "pem_certificate": { Type: schema.TypeString, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority.go index c4cd85e718..1cbef84c57 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_authority.go @@ -550,6 +550,52 @@ An object containing a list of "key": value pairs. Example: { "name": "wrench", fractional digits, terminated by 's'. Example: "3.5s".`, Default: "315360000s", }, + "pem_ca_certificate": { + Type: schema.TypeString, + Optional: true, + Description: `The signed CA certificate issued from the subordinated CA's CSR. This is needed when activating the subordiante CA with a third party issuer.`, + }, + "subordinate_config": { + Type: schema.TypeList, + Optional: true, + Description: `If this is a subordinate CertificateAuthority, this field will be set +with the subordinate configuration, which describes its issuers.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_authority": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: compareResourceNames, + Description: `This can refer to a CertificateAuthority that was used to create a +subordinate CertificateAuthority. This field is used for information +and usability purposes only. The resource name is in the format +'projects/*/locations/*/caPools/*/certificateAuthorities/*'.`, + ExactlyOneOf: []string{"subordinate_config.0.certificate_authority", "subordinate_config.0.pem_issuer_chain"}, + }, + "pem_issuer_chain": { + Type: schema.TypeList, + Optional: true, + Description: `Contains the PEM certificate chain for the issuers of this CertificateAuthority, +but not pem certificate for this CA itself.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pem_certificates": { + Type: schema.TypeList, + Optional: true, + Description: `Expected to be in leaf-to-root order according to RFC 5246.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ExactlyOneOf: []string{"subordinate_config.0.certificate_authority", "subordinate_config.0.pem_issuer_chain"}, + }, + }, + }, + }, "type": { Type: schema.TypeString, Optional: true, @@ -558,8 +604,7 @@ fractional digits, terminated by 's'. Example: "3.5s".`, Description: `The Type of this CertificateAuthority. ~> **Note:** For 'SUBORDINATE' Certificate Authorities, they need to -be manually activated (via Cloud Console of 'gcloud') before they can -issue certificates. Default value: "SELF_SIGNED" Possible values: ["SELF_SIGNED", "SUBORDINATE"]`, +be activated before they can issue certificates. Default value: "SELF_SIGNED" Possible values: ["SELF_SIGNED", "SUBORDINATE"]`, Default: "SELF_SIGNED", }, "access_urls": { @@ -676,6 +721,12 @@ func resourcePrivatecaCertificateAuthorityCreate(d *schema.ResourceData, meta in } else if v, ok := d.GetOkExists("key_spec"); !isEmptyValue(reflect.ValueOf(keySpecProp)) && (ok || !reflect.DeepEqual(v, keySpecProp)) { obj["keySpec"] = keySpecProp } + subordinateConfigProp, err := expandPrivatecaCertificateAuthoritySubordinateConfig(d.Get("subordinate_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subordinate_config"); !isEmptyValue(reflect.ValueOf(subordinateConfigProp)) && (ok || !reflect.DeepEqual(v, subordinateConfigProp)) { + obj["subordinateConfig"] = subordinateConfigProp + } gcsBucketProp, err := expandPrivatecaCertificateAuthorityGcsBucket(d.Get("gcs_bucket"), d, config) if err != nil { return err @@ -708,6 +759,9 @@ func resourcePrivatecaCertificateAuthorityCreate(d *schema.ResourceData, meta in billingProject = bp } + // Drop `subordinateConfig` as it can not be set during CA creation. + // It can be used to activate CA during post_create or pre_update. + delete(obj, "subordinateConfig") res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) if err != nil { return fmt.Errorf("Error creating CertificateAuthority: %s", err) @@ -753,27 +807,24 @@ func resourcePrivatecaCertificateAuthorityCreate(d *schema.ResourceData, meta in staged := d.Get("type").(string) == "SELF_SIGNED" + if d.Get("type").(string) == "SUBORDINATE" { + if _, ok := d.GetOk("subordinate_config"); ok { + // First party issuer + log.Printf("[DEBUG] Activating CertificateAuthority with first party issuer") + if err := activateSubCAWithFirstPartyIssuer(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error activating subordinate CA with first party issuer: %v", err) + } + staged = true + log.Printf("[DEBUG] CertificateAuthority activated") + } + } + // Enable the CA if `desired_state` is unspecified or specified as `ENABLED`. if p, ok := d.GetOk("desired_state"); !ok || p.(string) == "ENABLED" { // Skip enablement on SUBORDINATE CA for backward compatible. if staged { - url, err = replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:enable") - if err != nil { - return err - } - - log.Printf("[DEBUG] Enabling CertificateAuthority: %#v", obj) - - res, err = sendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return fmt.Errorf("Error enabling CertificateAuthority: %s", err) - } - - err = privatecaOperationWaitTimeWithResponse( - config, res, &opRes, project, "Enabling CertificateAuthority", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error waiting to enable CertificateAuthority: %s", err) + if err := enableCA(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %v", err) } } } @@ -850,6 +901,9 @@ func resourcePrivatecaCertificateAuthorityRead(d *schema.ResourceData, meta inte if err := d.Set("key_spec", flattenPrivatecaCertificateAuthorityKeySpec(res["keySpec"], d, config)); err != nil { return fmt.Errorf("Error reading CertificateAuthority: %s", err) } + if err := d.Set("subordinate_config", flattenPrivatecaCertificateAuthoritySubordinateConfig(res["subordinateConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateAuthority: %s", err) + } if err := d.Set("state", flattenPrivatecaCertificateAuthorityState(res["state"], d, config)); err != nil { return fmt.Errorf("Error reading CertificateAuthority: %s", err) } @@ -891,6 +945,12 @@ func resourcePrivatecaCertificateAuthorityUpdate(d *schema.ResourceData, meta in billingProject = project obj := make(map[string]interface{}) + subordinateConfigProp, err := expandPrivatecaCertificateAuthoritySubordinateConfig(d.Get("subordinate_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subordinate_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, subordinateConfigProp)) { + obj["subordinateConfig"] = subordinateConfigProp + } labelsProp, err := expandPrivatecaCertificateAuthorityLabels(d.Get("labels"), d, config) if err != nil { return err @@ -906,6 +966,10 @@ func resourcePrivatecaCertificateAuthorityUpdate(d *schema.ResourceData, meta in log.Printf("[DEBUG] Updating CertificateAuthority %q: %#v", d.Id(), obj) updateMask := []string{} + if d.HasChange("subordinate_config") { + updateMask = append(updateMask, "subordinateConfig") + } + if d.HasChange("labels") { updateMask = append(updateMask, "labels") } @@ -915,50 +979,43 @@ func resourcePrivatecaCertificateAuthorityUpdate(d *schema.ResourceData, meta in if err != nil { return err } + if d.HasChange("subordinate_config") { + if d.Get("type").(string) != "SUBORDINATE" { + return fmt.Errorf("`subordinate_config` can only be configured on subordinate CA") + } + + // Activate subordinate CA in `AWAITING_USER_ACTIVATION` state. + if d.Get("state") == "AWAITING_USER_ACTIVATION" { + if _, ok := d.GetOk("pem_ca_certificate"); ok { + // Third party issuer + log.Printf("[DEBUG] Activating CertificateAuthority with third party issuer") + if err := activateSubCAWithThirdPartyIssuer(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error activating subordinate CA with third party issuer: %v", err) + } + } else { + // First party issuer + log.Printf("[DEBUG] Activating CertificateAuthority with first party issuer") + if err := activateSubCAWithFirstPartyIssuer(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error activating subordinate CA with first party issuer: %v", err) + } + } + log.Printf("[DEBUG] CertificateAuthority activated") + } + } + + log.Printf("[DEBUG] checking desired_state") if d.HasChange("desired_state") { // Currently, most CA state update operations are not idempotent. // Try to change state only if the current `state` does not match the `desired_state`. if p, ok := d.GetOk("desired_state"); ok && p.(string) != d.Get("state").(string) { switch p.(string) { case "ENABLED": - enableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:enable") - if err != nil { - return err - } - - log.Printf("[DEBUG] Enabling CA: %#v", obj) - - res, err := sendRequest(config, "POST", billingProject, enableUrl, userAgent, nil) - if err != nil { - return fmt.Errorf("Error enabling CA: %s", err) - } - - var opRes map[string]interface{} - err = privatecaOperationWaitTimeWithResponse( - config, res, &opRes, project, "Enabling CA", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error waiting to enable CA: %s", err) + if err := enableCA(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %v", err) } case "DISABLED": - disableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:disable") - if err != nil { - return err - } - - log.Printf("[DEBUG] Disabling CA: %#v", obj) - - dRes, err := sendRequest(config, "POST", billingProject, disableUrl, userAgent, nil) - if err != nil { - return fmt.Errorf("Error disabling CA: %s", err) - } - - var opRes map[string]interface{} - err = privatecaOperationWaitTimeWithResponse( - config, dRes, &opRes, project, "Disabling CA", userAgent, - d.Timeout(schema.TimeoutDelete)) - if err != nil { - return fmt.Errorf("Error waiting to disable CA: %s", err) + if err := disableCA(config, d, project, billingProject, userAgent); err != nil { + return fmt.Errorf("Error disabling CertificateAuthority: %v", err) } default: return fmt.Errorf("Unsupported value in field `desired_state`") @@ -1266,6 +1323,42 @@ func flattenPrivatecaCertificateAuthorityKeySpecAlgorithm(v interface{}, d *sche return v } +func flattenPrivatecaCertificateAuthoritySubordinateConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["certificate_authority"] = + flattenPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(original["certificateAuthority"], d, config) + transformed["pem_issuer_chain"] = + flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(original["pemIssuerChain"], d, config) + return []interface{}{transformed} +} +func flattenPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pem_certificates"] = + flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(original["pemCertificates"], d, config) + return []interface{}{transformed} +} +func flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenPrivatecaCertificateAuthorityState(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -1611,6 +1704,59 @@ func expandPrivatecaCertificateAuthorityKeySpecAlgorithm(v interface{}, d Terraf return v, nil } +func expandPrivatecaCertificateAuthoritySubordinateConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCertificateAuthority, err := expandPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(original["certificate_authority"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertificateAuthority); val.IsValid() && !isEmptyValue(val) { + transformed["certificateAuthority"] = transformedCertificateAuthority + } + + transformedPemIssuerChain, err := expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(original["pem_issuer_chain"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPemIssuerChain); val.IsValid() && !isEmptyValue(val) { + transformed["pemIssuerChain"] = transformedPemIssuerChain + } + + return transformed, nil +} + +func expandPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPemCertificates, err := expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(original["pem_certificates"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPemCertificates); val.IsValid() && !isEmptyValue(val) { + transformed["pemCertificates"] = transformedPemCertificates + } + + return transformed, nil +} + +func expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandPrivatecaCertificateAuthorityGcsBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_template.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_template.go index e745d202f2..d25f08062d 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_template.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_privateca_certificate_template.go @@ -484,12 +484,12 @@ func resourcePrivatecaCertificateTemplateCreate(d *schema.ResourceData, meta int Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/certificateTemplates/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -506,7 +506,7 @@ func resourcePrivatecaCertificateTemplateCreate(d *schema.ResourceData, meta int } else { client.Config.BasePath = bp } - res, err := client.ApplyCertificateTemplate(context.Background(), obj, createDirective...) + res, err := client.ApplyCertificateTemplate(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -1227,7 +1227,6 @@ func flattenPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArra } return items } - func expandPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArray(o interface{}) []privateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnum { objs := o.([]interface{}) items := make([]privateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnum, 0, len(objs)) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_project_service_identity.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_project_service_identity.go index 23e46b1438..6645fa096e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_project_service_identity.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_project_service_identity.go @@ -34,6 +34,7 @@ func resourceProjectServiceIdentity() *schema.Resource { }, "email": { Type: schema.TypeString, + Optional: true, Computed: true, }, }, @@ -86,16 +87,16 @@ func resourceProjectServiceIdentityCreate(d *schema.ResourceData, meta interface } d.SetId(id) - emailVal, ok := opRes["email"] - if !ok { - return fmt.Errorf("response %v missing 'email'", opRes) - } - email, ok := emailVal.(string) - if !ok { - return fmt.Errorf("unexpected type for email: got %T, want string", email) - } - if err := d.Set("email", email); err != nil { - return fmt.Errorf("Error setting email: %s", err) + // This API may not return the service identity's details, even if the relevant + // Google API is configured for service identities. + if emailVal, ok := opRes["email"]; ok { + email, ok := emailVal.(string) + if !ok { + return fmt.Errorf("unexpected type for email: got %T, want string", email) + } + if err := d.Set("email", email); err != nil { + return fmt.Errorf("Error setting email: %s", err) + } } return nil } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription.go index 9e096c4f09..af594eda8e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_pubsub_subscription.go @@ -91,6 +91,41 @@ for the call to the push endpoint. If the subscriber never acknowledges the message, the Pub/Sub system will eventually redeliver the message.`, }, + "bigquery_config": { + Type: schema.TypeList, + Optional: true, + Description: `If delivery to BigQuery is used with this subscription, this field is used to configure it. +Either pushConfig or bigQueryConfig can be set, but not both. +If both are empty, then the subscriber will pull and ack messages using API methods.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Required: true, + Description: `The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId}`, + }, + "drop_unknown_fields": { + Type: schema.TypeBool, + Optional: true, + Description: `When true and useTopicSchema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. +Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog.`, + }, + "use_topic_schema": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, use the topic's schema as the columns to write to in BigQuery, if it exists.`, + }, + "write_metadata": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, write the subscription name, messageId, publishTime, attributes, and orderingKey to additional columns in the table. +The subscription name, messageId, and publishTime fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column.`, + }, + }, + }, + ConflictsWith: []string{"push_config"}, + }, "dead_letter_policy": { Type: schema.TypeList, Optional: true, @@ -293,6 +328,7 @@ Note: if not specified, the Push endpoint URL will be used.`, }, }, }, + ConflictsWith: []string{"bigquery_config"}, }, "retain_acked_messages": { Type: schema.TypeBool, @@ -368,6 +404,12 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } + bigqueryConfigProp, err := expandPubsubSubscriptionBigqueryConfig(d.Get("bigquery_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bigquery_config"); !isEmptyValue(reflect.ValueOf(bigqueryConfigProp)) && (ok || !reflect.DeepEqual(v, bigqueryConfigProp)) { + obj["bigqueryConfig"] = bigqueryConfigProp + } pushConfigProp, err := expandPubsubSubscriptionPushConfig(d.Get("push_config"), d, config) if err != nil { return err @@ -553,6 +595,9 @@ func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) er if err := d.Set("labels", flattenPubsubSubscriptionLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Subscription: %s", err) } + if err := d.Set("bigquery_config", flattenPubsubSubscriptionBigqueryConfig(res["bigqueryConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Subscription: %s", err) + } if err := d.Set("push_config", flattenPubsubSubscriptionPushConfig(res["pushConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Subscription: %s", err) } @@ -609,6 +654,12 @@ func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } + bigqueryConfigProp, err := expandPubsubSubscriptionBigqueryConfig(d.Get("bigquery_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bigquery_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bigqueryConfigProp)) { + obj["bigqueryConfig"] = bigqueryConfigProp + } pushConfigProp, err := expandPubsubSubscriptionPushConfig(d.Get("push_config"), d, config) if err != nil { return err @@ -669,6 +720,10 @@ func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) updateMask = append(updateMask, "labels") } + if d.HasChange("bigquery_config") { + updateMask = append(updateMask, "bigqueryConfig") + } + if d.HasChange("push_config") { updateMask = append(updateMask, "pushConfig") } @@ -794,6 +849,41 @@ func flattenPubsubSubscriptionLabels(v interface{}, d *schema.ResourceData, conf return v } +func flattenPubsubSubscriptionBigqueryConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["table"] = + flattenPubsubSubscriptionBigqueryConfigTable(original["table"], d, config) + transformed["use_topic_schema"] = + flattenPubsubSubscriptionBigqueryConfigUseTopicSchema(original["useTopicSchema"], d, config) + transformed["write_metadata"] = + flattenPubsubSubscriptionBigqueryConfigWriteMetadata(original["writeMetadata"], d, config) + transformed["drop_unknown_fields"] = + flattenPubsubSubscriptionBigqueryConfigDropUnknownFields(original["dropUnknownFields"], d, config) + return []interface{}{transformed} +} +func flattenPubsubSubscriptionBigqueryConfigTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenPubsubSubscriptionBigqueryConfigUseTopicSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenPubsubSubscriptionBigqueryConfigWriteMetadata(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenPubsubSubscriptionBigqueryConfigDropUnknownFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenPubsubSubscriptionPushConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return nil @@ -989,6 +1079,62 @@ func expandPubsubSubscriptionLabels(v interface{}, d TerraformResourceData, conf return m, nil } +func expandPubsubSubscriptionBigqueryConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandPubsubSubscriptionBigqueryConfigTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + transformed["table"] = transformedTable + } + + transformedUseTopicSchema, err := expandPubsubSubscriptionBigqueryConfigUseTopicSchema(original["use_topic_schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUseTopicSchema); val.IsValid() && !isEmptyValue(val) { + transformed["useTopicSchema"] = transformedUseTopicSchema + } + + transformedWriteMetadata, err := expandPubsubSubscriptionBigqueryConfigWriteMetadata(original["write_metadata"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWriteMetadata); val.IsValid() && !isEmptyValue(val) { + transformed["writeMetadata"] = transformedWriteMetadata + } + + transformedDropUnknownFields, err := expandPubsubSubscriptionBigqueryConfigDropUnknownFields(original["drop_unknown_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDropUnknownFields); val.IsValid() && !isEmptyValue(val) { + transformed["dropUnknownFields"] = transformedDropUnknownFields + } + + return transformed, nil +} + +func expandPubsubSubscriptionBigqueryConfigTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandPubsubSubscriptionBigqueryConfigUseTopicSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandPubsubSubscriptionBigqueryConfigWriteMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandPubsubSubscriptionBigqueryConfigDropUnknownFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandPubsubSubscriptionPushConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_recaptcha_enterprise_key.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_recaptcha_enterprise_key.go index de09af2ad7..350d928e92 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_recaptcha_enterprise_key.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_recaptcha_enterprise_key.go @@ -233,12 +233,12 @@ func resourceRecaptchaEnterpriseKeyCreate(d *schema.ResourceData, meta interface WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/keys/{{name}}") + id, err := obj.ID() if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - createDirective := CreateDirective + directive := CreateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err @@ -255,7 +255,7 @@ func resourceRecaptchaEnterpriseKeyCreate(d *schema.ResourceData, meta interface } else { client.Config.BasePath = bp } - res, err := client.ApplyKey(context.Background(), obj, createDirective...) + res, err := client.ApplyKey(context.Background(), obj, directive...) if _, ok := err.(dcl.DiffAfterApplyError); ok { log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) @@ -268,10 +268,11 @@ func resourceRecaptchaEnterpriseKeyCreate(d *schema.ResourceData, meta interface if err = d.Set("name", res.Name); err != nil { return fmt.Errorf("error setting name in state: %s", err) } - // Id has a server-generated value, set again after creation - id, err = replaceVarsForId(d, config, "projects/{{project}}/keys/{{name}}") + // ID has a server-generated value, set again after creation. + + id, err = res.ID() if err != nil { - return fmt.Errorf("Error constructing id: %s", err) + return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_redis_instance.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_redis_instance.go index 9105d13905..410b2ec0c2 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_redis_instance.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_redis_instance.go @@ -136,6 +136,13 @@ will be used.`, Description: `The connection mode of the Redis instance. Default value: "DIRECT_PEERING" Possible values: ["DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS"]`, Default: "DIRECT_PEERING", }, + "customer_managed_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Optional. The KMS key reference that you want to use to encrypt the data at rest for this Redis +instance. If this is provided, CMEK is enabled.`, + }, "display_name": { Type: schema.TypeString, Optional: true, @@ -616,6 +623,12 @@ func resourceRedisInstanceCreate(d *schema.ResourceData, meta interface{}) error } else if v, ok := d.GetOkExists("secondary_ip_range"); !isEmptyValue(reflect.ValueOf(secondaryIpRangeProp)) && (ok || !reflect.DeepEqual(v, secondaryIpRangeProp)) { obj["secondaryIpRange"] = secondaryIpRangeProp } + customerManagedKeyProp, err := expandRedisInstanceCustomerManagedKey(d.Get("customer_managed_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("customer_managed_key"); !isEmptyValue(reflect.ValueOf(customerManagedKeyProp)) && (ok || !reflect.DeepEqual(v, customerManagedKeyProp)) { + obj["customerManagedKey"] = customerManagedKeyProp + } obj, err = resourceRedisInstanceEncoder(d, meta, obj) if err != nil { @@ -827,6 +840,9 @@ func resourceRedisInstanceRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("secondary_ip_range", flattenRedisInstanceSecondaryIpRange(res["secondaryIpRange"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } + if err := d.Set("customer_managed_key", flattenRedisInstanceCustomerManagedKey(res["customerManagedKey"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } return nil } @@ -1490,6 +1506,10 @@ func flattenRedisInstanceSecondaryIpRange(v interface{}, d *schema.ResourceData, return v } +func flattenRedisInstanceCustomerManagedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func expandRedisInstanceAlternativeLocationId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -1778,6 +1798,10 @@ func expandRedisInstanceSecondaryIpRange(v interface{}, d TerraformResourceData, return v, nil } +func expandRedisInstanceCustomerManagedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func resourceRedisInstanceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { config := meta.(*Config) region, err := getRegionFromSchema("region", "location_id", d, config) diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_service_usage_consumer_quota_override.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_service_usage_consumer_quota_override.go index 5fc37014fa..a83569093a 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_service_usage_consumer_quota_override.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_service_usage_consumer_quota_override.go @@ -42,10 +42,13 @@ func resourceServiceUsageConsumerQuotaOverride() *schema.Resource { Schema: map[string]*schema.Schema{ "limit": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The limit on the metric, e.g. '/project/region'.`, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The limit on the metric, e.g. '/project/region'. + +~> Make sure that 'limit' is in a format that doesn't start with '1/' or contain curly braces. +E.g. use '/project/user' instead of '1/{project}/{user}'.`, }, "metric": { Type: schema.TypeString, diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database.go index 71e2a1a160..08c3415e9e 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_spanner_database.go @@ -19,6 +19,8 @@ import ( "fmt" "log" "reflect" + "regexp" + "strconv" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -56,6 +58,44 @@ func resourceSpannerDBDdlCustomDiff(_ context.Context, diff *schema.ResourceDiff return resourceSpannerDBDdlCustomDiffFunc(diff) } +func validateDatabaseRetentionPeriod(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + valueError := fmt.Errorf("version_retention_period should be in range [1h, 7d], in a format resembling 1d, 24h, 1440m, or 86400s") + + r := regexp.MustCompile("^(\\d{1}d|\\d{1,3}h|\\d{2,5}m|\\d{4,6}s)$") + if !r.MatchString(value) { + errors = append(errors, valueError) + return + } + + unit := value[len(value)-1:] + multiple := value[:len(value)-1] + num, err := strconv.Atoi(multiple) + if err != nil { + errors = append(errors, valueError) + return + } + + if unit == "d" && (num < 1 || num > 7) { + errors = append(errors, valueError) + return + } + if unit == "h" && (num < 1 || num > 7*24) { + errors = append(errors, valueError) + return + } + if unit == "m" && (num < 1*60 || num > 7*24*60) { + errors = append(errors, valueError) + return + } + if unit == "s" && (num < 1*60*60 || num > 7*24*60*60) { + errors = append(errors, valueError) + return + } + + return +} + func resourceSpannerDatabase() *schema.Resource { return &schema.Resource{ Create: resourceSpannerDatabaseCreate, @@ -98,10 +138,7 @@ the instance is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].`, ForceNew: true, ValidateFunc: validateEnum([]string{"GOOGLE_STANDARD_SQL", "POSTGRESQL", ""}), Description: `The dialect of the Cloud Spanner Database. -If it is not provided, "GOOGLE_STANDARD_SQL" will be used. -Note: Databases that are created with POSTGRESQL dialect do not support -extra DDL statements in the 'CreateDatabase' call. You must therefore re-apply -terraform with ddl on the same database after creation. Possible values: ["GOOGLE_STANDARD_SQL", "POSTGRESQL"]`, +If it is not provided, "GOOGLE_STANDARD_SQL" will be used. Possible values: ["GOOGLE_STANDARD_SQL", "POSTGRESQL"]`, }, "ddl": { Type: schema.TypeList, @@ -132,6 +169,17 @@ in the same location as the Spanner Database.`, }, }, }, + "version_retention_period": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validateDatabaseRetentionPeriod, + Description: `The retention period for the database. The retention period must be between 1 hour +and 7 days, and can be specified in days, hours, minutes, or seconds. For example, +the values 1d, 24h, 1440m, and 86400s are equivalent. Default value is 1h. +If this property is used, you must avoid adding new DDL statements to 'ddl' that +update the database's version_retention_period.`, + }, "state": { Type: schema.TypeString, Computed: true, @@ -167,6 +215,12 @@ func resourceSpannerDatabaseCreate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } + versionRetentionPeriodProp, err := expandSpannerDatabaseVersionRetentionPeriod(d.Get("version_retention_period"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_retention_period"); !isEmptyValue(reflect.ValueOf(versionRetentionPeriodProp)) && (ok || !reflect.DeepEqual(v, versionRetentionPeriodProp)) { + obj["versionRetentionPeriod"] = versionRetentionPeriodProp + } extraStatementsProp, err := expandSpannerDatabaseDdl(d.Get("ddl"), d, config) if err != nil { return err @@ -259,6 +313,69 @@ func resourceSpannerDatabaseCreate(d *schema.ResourceData, meta interface{}) err } d.SetId(id) + // Note: Databases that are created with POSTGRESQL dialect do not support extra DDL + // statements at the time of database creation. To avoid users needing to run + // `terraform apply` twice to get their desired outcome, the provider does not set + // `extraStatements` in the call to the `create` endpoint and all DDL (other than + // ) is run post-create, by calling the `updateDdl` endpoint + + _, ok := opRes["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + retention, retentionPeriodOk := d.GetOk("version_retention_period") + retentionPeriod := retention.(string) + ddl, ddlOk := d.GetOk("ddl") + ddlStatements := ddl.([]interface{}) + + if retentionPeriodOk || ddlOk { + + obj := make(map[string]interface{}) + updateDdls := []string{} + + if ddlOk { + for i := 0; i < len(ddlStatements); i++ { + updateDdls = append(updateDdls, ddlStatements[i].(string)) + } + } + + if retentionPeriodOk { + dbName := d.Get("name") + retentionDdl := fmt.Sprintf("ALTER DATABASE `%s` SET OPTIONS (version_retention_period=\"%s\")", dbName, retentionPeriod) + if dialect, ok := d.GetOk("database_dialect"); ok && dialect == "POSTGRESQL" { + retentionDdl = fmt.Sprintf("ALTER DATABASE \"%s\" SET spanner.version_retention_period TO \"%s\"", dbName, retentionPeriod) + } + updateDdls = append(updateDdls, retentionDdl) + } + + log.Printf("[DEBUG] Applying extra DDL statements to the new Database: %#v", updateDdls) + + obj["statements"] = updateDdls + + url, err = replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}/ddl") + if err != nil { + return err + } + + res, err = sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error executing DDL statements on Database: %s", err) + } + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = spannerOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Database", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to run DDL against newly-created Database: %s", err) + } + } + log.Printf("[DEBUG] Finished creating Database %q: %#v", d.Id(), res) return resourceSpannerDatabaseRead(d, meta) @@ -319,6 +436,9 @@ func resourceSpannerDatabaseRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("name", flattenSpannerDatabaseName(res["name"], d, config)); err != nil { return fmt.Errorf("Error reading Database: %s", err) } + if err := d.Set("version_retention_period", flattenSpannerDatabaseVersionRetentionPeriod(res["versionRetentionPeriod"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } if err := d.Set("state", flattenSpannerDatabaseState(res["state"], d, config)); err != nil { return fmt.Errorf("Error reading Database: %s", err) } @@ -352,9 +472,15 @@ func resourceSpannerDatabaseUpdate(d *schema.ResourceData, meta interface{}) err d.Partial(true) - if d.HasChange("ddl") { + if d.HasChange("version_retention_period") || d.HasChange("ddl") { obj := make(map[string]interface{}) + versionRetentionPeriodProp, err := expandSpannerDatabaseVersionRetentionPeriod(d.Get("version_retention_period"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_retention_period"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionRetentionPeriodProp)) { + obj["versionRetentionPeriod"] = versionRetentionPeriodProp + } extraStatementsProp, err := expandSpannerDatabaseDdl(d.Get("ddl"), d, config) if err != nil { return err @@ -478,6 +604,10 @@ func flattenSpannerDatabaseName(v interface{}, d *schema.ResourceData, config *C return NameFromSelfLinkStateFunc(v) } +func flattenSpannerDatabaseVersionRetentionPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenSpannerDatabaseState(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -514,6 +644,10 @@ func expandSpannerDatabaseName(v interface{}, d TerraformResourceData, config *C return v, nil } +func expandSpannerDatabaseVersionRetentionPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandSpannerDatabaseDdl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -558,8 +692,17 @@ func resourceSpannerDatabaseEncoder(d *schema.ResourceData, meta interface{}, ob if dialect, ok := obj["databaseDialect"]; ok && dialect == "POSTGRESQL" { obj["createStatement"] = fmt.Sprintf("CREATE DATABASE \"%s\"", obj["name"]) } + + // Extra DDL statements are removed from the create request and instead applied to the database in + // a post-create action, to accommodate retrictions when creating PostgreSQL-enabled databases. + // https://cloud.google.com/spanner/docs/create-manage-databases#create_a_database + log.Printf("[DEBUG] Preparing to create new Database. Any extra DDL statements will be applied to the Database in a separate API call") + delete(obj, "name") delete(obj, "instance") + + delete(obj, "versionRetentionPeriod") + delete(obj, "extraStatements") return obj, nil } @@ -574,8 +717,19 @@ func resourceSpannerDatabaseUpdateEncoder(d *schema.ResourceData, meta interface updateDdls = append(updateDdls, newDdls[i].(string)) } + //Add statement to update version_retention_period property, if needed + if d.HasChange("version_retention_period") { + dbName := d.Get("name") + retentionDdl := fmt.Sprintf("ALTER DATABASE `%s` SET OPTIONS (version_retention_period=\"%s\")", dbName, obj["versionRetentionPeriod"]) + if dialect, ok := d.GetOk("database_dialect"); ok && dialect == "POSTGRESQL" { + retentionDdl = fmt.Sprintf("ALTER DATABASE \"%s\" SET spanner.version_retention_period TO \"%s\"", dbName, obj["versionRetentionPeriod"]) + } + updateDdls = append(updateDdls, retentionDdl) + } + obj["statements"] = updateDdls delete(obj, "name") + delete(obj, "versionRetentionPeriod") delete(obj, "instance") delete(obj, "extraStatements") return obj, nil diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance.go index f6f4b59ec0..9e43028793 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_sql_database_instance.go @@ -157,6 +157,30 @@ func resourceSqlDatabaseInstance() *schema.Resource { }, }, }, + "sql_server_audit_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `The name of the destination bucket (e.g., gs://mybucket).`, + }, + "retention_interval": { + Type: schema.TypeString, + Optional: true, + Description: `How long to keep generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s"..`, + }, + "upload_interval": { + Type: schema.TypeString, + Optional: true, + Description: `How often to upload generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + }, + }, + }, "availability_type": { Type: schema.TypeString, Optional: true, @@ -243,6 +267,7 @@ is set to true.`, "collation": { Type: schema.TypeString, Optional: true, + ForceNew: true, Description: `The name of server instance collation.`, }, "database_flags": { @@ -350,6 +375,11 @@ is set to true.`, AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, Description: `The preferred compute engine zone.`, }, + "secondary_zone": { + Type: schema.TypeString, + Optional: true, + Description: `The preferred Compute Engine zone for the secondary/failover`, + }, }, }, }, @@ -432,6 +462,48 @@ is set to true.`, }, Description: `Configuration of Query Insights.`, }, + "password_validation_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_length": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + Description: `Minimum number of characters allowed.`, + }, + "complexity": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"COMPLEXITY_DEFAULT", "COMPLEXITY_UNSPECIFIED"}, false), + Description: `Password complexity.`, + }, + "reuse_interval": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + Description: `Number of previous passwords that cannot be reused.`, + }, + "disallow_username_substring": { + Type: schema.TypeBool, + Optional: true, + Description: `Disallow username as a part of the password.`, + }, + "password_change_interval": { + Type: schema.TypeString, + Optional: true, + Description: `Minimum interval after which the password can be changed. This flag is only supported for PostgresSQL.`, + }, + "enable_password_policy": { + Type: schema.TypeBool, + Required: true, + Description: `Whether the password policy is enabled or not.`, + }, + }, + }, + }, }, }, Description: `The settings to use for the database. The configuration is detailed below.`, @@ -462,7 +534,7 @@ is set to true.`, Optional: true, ForceNew: true, Sensitive: true, - Description: `Initial root password. Required for MS SQL Server, ignored by MySQL and PostgreSQL.`, + Description: `Initial root password. Required for MS SQL Server.`, }, "ip_address": { @@ -800,10 +872,7 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) instance.Settings = desiredSettings } - // MSSQL Server require rootPassword to be set - if strings.Contains(instance.DatabaseVersion, "SQLSERVER") { - instance.RootPassword = d.Get("root_password").(string) - } + instance.RootPassword = d.Get("root_password").(string) // Modifying a replica during Create can cause problems if the master is // modified at the same time. Lock the master until we're done in order @@ -812,6 +881,7 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) mutexKV.Lock(instanceMutexKey(project, instance.MasterInstanceName)) defer mutexKV.Unlock(instanceMutexKey(project, instance.MasterInstanceName)) } + if k, ok := d.GetOk("encryption_key_name"); ok { instance.DiskEncryptionConfiguration = &sqladmin.DiskEncryptionConfiguration{ KmsKeyName: k.(string), @@ -952,23 +1022,25 @@ func expandSqlDatabaseInstanceSettings(configured []interface{}) *sqladmin.Setti _settings := configured[0].(map[string]interface{}) settings := &sqladmin.Settings{ // Version is unset in Create but is set during update - SettingsVersion: int64(_settings["version"].(int)), - Tier: _settings["tier"].(string), - ForceSendFields: []string{"StorageAutoResize"}, - ActivationPolicy: _settings["activation_policy"].(string), - ActiveDirectoryConfig: expandActiveDirectoryConfig(_settings["active_directory_config"].([]interface{})), - AvailabilityType: _settings["availability_type"].(string), - Collation: _settings["collation"].(string), - DataDiskSizeGb: int64(_settings["disk_size"].(int)), - DataDiskType: _settings["disk_type"].(string), - PricingPlan: _settings["pricing_plan"].(string), - UserLabels: convertStringMap(_settings["user_labels"].(map[string]interface{})), - BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})), - DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].([]interface{})), - IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{})), - LocationPreference: expandLocationPreference(_settings["location_preference"].([]interface{})), - MaintenanceWindow: expandMaintenanceWindow(_settings["maintenance_window"].([]interface{})), - InsightsConfig: expandInsightsConfig(_settings["insights_config"].([]interface{})), + SettingsVersion: int64(_settings["version"].(int)), + Tier: _settings["tier"].(string), + ForceSendFields: []string{"StorageAutoResize"}, + ActivationPolicy: _settings["activation_policy"].(string), + ActiveDirectoryConfig: expandActiveDirectoryConfig(_settings["active_directory_config"].([]interface{})), + SqlServerAuditConfig: expandSqlServerAuditConfig(_settings["sql_server_audit_config"].([]interface{})), + AvailabilityType: _settings["availability_type"].(string), + Collation: _settings["collation"].(string), + DataDiskSizeGb: int64(_settings["disk_size"].(int)), + DataDiskType: _settings["disk_type"].(string), + PricingPlan: _settings["pricing_plan"].(string), + UserLabels: convertStringMap(_settings["user_labels"].(map[string]interface{})), + BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})), + DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].([]interface{})), + IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{})), + LocationPreference: expandLocationPreference(_settings["location_preference"].([]interface{})), + MaintenanceWindow: expandMaintenanceWindow(_settings["maintenance_window"].([]interface{})), + InsightsConfig: expandInsightsConfig(_settings["insights_config"].([]interface{})), + PasswordValidationPolicy: expandPasswordValidationPolicy(_settings["password_validation_policy"].([]interface{})), } resize := _settings["disk_autoresize"].(bool) @@ -1040,6 +1112,7 @@ func expandLocationPreference(configured []interface{}) *sqladmin.LocationPrefer return &sqladmin.LocationPreference{ FollowGaeApplication: _locationPreference["follow_gae_application"].(string), Zone: _locationPreference["zone"].(string), + SecondaryZone: _locationPreference["secondary_zone"].(string), } } @@ -1131,6 +1204,20 @@ func expandActiveDirectoryConfig(configured interface{}) *sqladmin.SqlActiveDire } } +func expandSqlServerAuditConfig(configured interface{}) *sqladmin.SqlServerAuditConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.SqlServerAuditConfig{ + Bucket: config["bucket"].(string), + RetentionInterval: config["retention_interval"].(string), + UploadInterval: config["upload_interval"].(string), + } +} + func expandInsightsConfig(configured []interface{}) *sqladmin.InsightsConfig { if len(configured) == 0 || configured[0] == nil { return nil @@ -1145,6 +1232,22 @@ func expandInsightsConfig(configured []interface{}) *sqladmin.InsightsConfig { } } +func expandPasswordValidationPolicy(configured []interface{}) *sqladmin.PasswordValidationPolicy { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _passwordValidationPolicy := configured[0].(map[string]interface{}) + return &sqladmin.PasswordValidationPolicy{ + MinLength: int64(_passwordValidationPolicy["min_length"].(int)), + Complexity: _passwordValidationPolicy["complexity"].(string), + ReuseInterval: int64(_passwordValidationPolicy["reuse_interval"].(int)), + DisallowUsernameSubstring: _passwordValidationPolicy["disallow_username_substring"].(bool), + PasswordChangeInterval: _passwordValidationPolicy["password_change_interval"].(string), + EnablePasswordPolicy: _passwordValidationPolicy["enable_password_policy"].(bool), + } +} + func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) userAgent, err := generateUserAgentString(d, config.userAgent) @@ -1185,6 +1288,7 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e if err := d.Set("settings", flattenSettings(instance.Settings)); err != nil { log.Printf("[WARN] Failed to set SQL Database Instance Settings") } + if instance.DiskEncryptionConfiguration != nil { if err := d.Set("encryption_key_name", instance.DiskEncryptionConfiguration.KmsKeyName); err != nil { return fmt.Errorf("Error setting encryption_key_name: %s", err) @@ -1259,6 +1363,9 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) Settings: expandSqlDatabaseInstanceSettings(d.Get("settings").([]interface{})), } + // Collation cannot be included in the update request + instance.Settings.Collation = "" + // Lock on the master_instance_name just in case updating any replica // settings causes operations on the master. if v, ok := d.GetOk("master_instance_name"); ok { @@ -1361,21 +1468,26 @@ func resourceSqlDatabaseInstanceImport(d *schema.ResourceData, meta interface{}) func flattenSettings(settings *sqladmin.Settings) []map[string]interface{} { data := map[string]interface{}{ - "version": settings.SettingsVersion, - "tier": settings.Tier, - "activation_policy": settings.ActivationPolicy, - "availability_type": settings.AvailabilityType, - "collation": settings.Collation, - "disk_type": settings.DataDiskType, - "disk_size": settings.DataDiskSizeGb, - "pricing_plan": settings.PricingPlan, - "user_labels": settings.UserLabels, + "version": settings.SettingsVersion, + "tier": settings.Tier, + "activation_policy": settings.ActivationPolicy, + "availability_type": settings.AvailabilityType, + "collation": settings.Collation, + "disk_type": settings.DataDiskType, + "disk_size": settings.DataDiskSizeGb, + "pricing_plan": settings.PricingPlan, + "user_labels": settings.UserLabels, + "password_validation_policy": settings.PasswordValidationPolicy, } if settings.ActiveDirectoryConfig != nil { data["active_directory_config"] = flattenActiveDirectoryConfig(settings.ActiveDirectoryConfig) } + if settings.SqlServerAuditConfig != nil { + data["sql_server_audit_config"] = flattenSqlServerAuditConfig(settings.SqlServerAuditConfig) + } + if settings.BackupConfiguration != nil { data["backup_configuration"] = flattenBackupConfiguration(settings.BackupConfiguration) } @@ -1407,6 +1519,10 @@ func flattenSettings(settings *sqladmin.Settings) []map[string]interface{} { data["user_labels"] = settings.UserLabels } + if settings.PasswordValidationPolicy != nil { + data["password_validation_policy"] = flattenPasswordValidationPolicy(settings.PasswordValidationPolicy) + } + return []map[string]interface{}{data} } @@ -1447,6 +1563,19 @@ func flattenActiveDirectoryConfig(sqlActiveDirectoryConfig *sqladmin.SqlActiveDi } } +func flattenSqlServerAuditConfig(sqlServerAuditConfig *sqladmin.SqlServerAuditConfig) []map[string]interface{} { + if sqlServerAuditConfig == nil { + return nil + } + return []map[string]interface{}{ + { + "bucket": sqlServerAuditConfig.Bucket, + "retention_interval": sqlServerAuditConfig.RetentionInterval, + "upload_interval": sqlServerAuditConfig.UploadInterval, + }, + } +} + func flattenDatabaseFlags(databaseFlags []*sqladmin.DatabaseFlags) []map[string]interface{} { flags := make([]map[string]interface{}, 0, len(databaseFlags)) @@ -1497,6 +1626,7 @@ func flattenLocationPreference(locationPreference *sqladmin.LocationPreference) data := map[string]interface{}{ "follow_gae_application": locationPreference.FollowGaeApplication, "zone": locationPreference.Zone, + "secondary_zone": locationPreference.SecondaryZone, } return []map[string]interface{}{data} @@ -1587,6 +1717,18 @@ func flattenInsightsConfig(insightsConfig *sqladmin.InsightsConfig) interface{} return []map[string]interface{}{data} } +func flattenPasswordValidationPolicy(passwordValidationPolicy *sqladmin.PasswordValidationPolicy) interface{} { + data := map[string]interface{}{ + "min_length": passwordValidationPolicy.MinLength, + "complexity": passwordValidationPolicy.Complexity, + "reuse_interval": passwordValidationPolicy.ReuseInterval, + "disallow_username_substring": passwordValidationPolicy.DisallowUsernameSubstring, + "password_change_interval": passwordValidationPolicy.PasswordChangeInterval, + "enable_password_policy": passwordValidationPolicy.EnablePasswordPolicy, + } + return []map[string]interface{}{data} +} + func instanceMutexKey(project, instance_name string) string { return fmt.Sprintf("google-sql-database-instance-%s-%s", project, instance_name) } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket.go index 471ec0f4f4..72a0428c4d 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/resource_storage_bucket.go @@ -207,6 +207,18 @@ func resourceStorageBucket() *schema.Resource { Optional: true, Description: `Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition.`, }, + "matches_prefix": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `One or more matching name prefixes to satisfy this condition.`, + }, + "matches_suffix": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `One or more matching name suffixes to satisfy this condition.`, + }, }, }, Description: `The Lifecycle Rule's condition configuration.`, @@ -389,6 +401,9 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error // Get the bucket and location bucket := d.Get("name").(string) + if err := checkGCSName(bucket); err != nil { + return err + } location := d.Get("location").(string) // Create a bucket, setting the labels, location and name. @@ -986,6 +1001,8 @@ func flattenBucketLifecycleRuleCondition(condition *storage.BucketLifecycleRuleC "days_since_custom_time": int(condition.DaysSinceCustomTime), "days_since_noncurrent_time": int(condition.DaysSinceNoncurrentTime), "noncurrent_time_before": condition.NoncurrentTimeBefore, + "matches_prefix": convertStringArrToInterface(condition.MatchesPrefix), + "matches_suffix": convertStringArrToInterface(condition.MatchesSuffix), } if condition.IsLive == nil { ruleCondition["with_state"] = "ANY" @@ -1199,6 +1216,25 @@ func expandStorageBucketLifecycleRuleCondition(v interface{}) (*storage.BucketLi transformed.NoncurrentTimeBefore = v.(string) } + if v, ok := condition["matches_prefix"]; ok { + prefixes := v.([]interface{}) + transformedPrefixes := make([]string, 0, len(prefixes)) + + for _, v := range prefixes { + transformedPrefixes = append(transformedPrefixes, v.(string)) + } + transformed.MatchesPrefix = transformedPrefixes + } + if v, ok := condition["matches_suffix"]; ok { + suffixes := v.([]interface{}) + transformedSuffixes := make([]string, 0, len(suffixes)) + + for _, v := range suffixes { + transformedSuffixes = append(transformedSuffixes, v.(string)) + } + transformed.MatchesSuffix = transformedSuffixes + } + return transformed, nil } @@ -1264,6 +1300,19 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%d-", v.(int))) } + if v, ok := m["matches_prefix"]; ok { + matches_prefixes := v.([]interface{}) + for _, matches_prefix := range matches_prefixes { + buf.WriteString(fmt.Sprintf("%s-", matches_prefix)) + } + } + if v, ok := m["matches_suffix"]; ok { + matches_suffixes := v.([]interface{}) + for _, matches_suffix := range matches_suffixes { + buf.WriteString(fmt.Sprintf("%s-", matches_suffix)) + } + } + return hashcode(buf.String()) } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/service_usage_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/service_usage_operation.go index 6cbb9e7fe4..ae12c59ea7 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/service_usage_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/service_usage_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/spanner_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/spanner_operation.go index 3e746dcf55..9934ff31aa 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/spanner_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/spanner_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/tags_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/tags_operation.go index b9e09955d3..079eae09be 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/tags_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/tags_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/tpu_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/tpu_operation.go index 490b4eef64..7ffcb03ee9 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/tpu_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/tpu_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/transport.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/transport.go index 942aebac29..f009a70667 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/transport.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/transport.go @@ -1,4 +1,3 @@ -// package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils.go index 0ce18e799b..7717851281 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/utils.go @@ -6,6 +6,7 @@ import ( "fmt" "log" "os" + "regexp" "sort" "strconv" "strings" @@ -525,3 +526,28 @@ func fake404(reasonResourceType, resourceName string) *googleapi.Error { Message: fmt.Sprintf("%v object %v not found", reasonResourceType, resourceName), } } + +// validate name of the gcs bucket. Guidelines are located at https://cloud.google.com/storage/docs/naming-buckets +// this does not attempt to check for IP addresses or close misspellings of "google" +func checkGCSName(name string) error { + if strings.HasPrefix(name, "goog") { + return fmt.Errorf("error: bucket name %s cannot start with %q", name, "goog") + } + + if strings.Contains(name, "google") { + return fmt.Errorf("error: bucket name %s cannot contain %q", name, "google") + } + + valid, _ := regexp.MatchString("^[a-z0-9][a-z0-9_.-]{1,220}[a-z0-9]$", name) + if !valid { + return fmt.Errorf("error: bucket name validation failed %v. See https://cloud.google.com/storage/docs/naming-buckets", name) + } + + for _, str := range strings.Split(name, ".") { + valid, _ := regexp.MatchString("^[a-z0-9_-]{1,63}$", str) + if !valid { + return fmt.Errorf("error: bucket name validation failed %v", str) + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation.go index ee198c181c..4b70a2e7d6 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/validation.go @@ -82,7 +82,9 @@ var rfc1918Networks = []string{ "192.168.0.0/16", } -func validateGCPName(v interface{}, k string) (ws []string, errors []error) { +// validateGCEName ensures that a field matches the requirements for Compute Engine resource names +// https://cloud.google.com/compute/docs/naming-resources#resource-name-format +func validateGCEName(v interface{}, k string) (ws []string, errors []error) { re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$` return validateRegexp(re)(v, k) } diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/vpc_access_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/vpc_access_operation.go index df0d1109cc..ffd34ed1f2 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/vpc_access_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/vpc_access_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/workflows_operation.go b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/workflows_operation.go index 0c855d09f6..f46412abc7 100644 --- a/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/workflows_operation.go +++ b/vendor/github.com/hashicorp/terraform-provider-google-beta/google-beta/workflows_operation.go @@ -11,6 +11,7 @@ // .github/CONTRIBUTING.md. // // ---------------------------------------------------------------------------- + package google import ( diff --git a/vendor/github.com/hashicorp/terraform-registry-address/.go-version b/vendor/github.com/hashicorp/terraform-registry-address/.go-version index 42cf0675c5..adc97d8e22 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/.go-version +++ b/vendor/github.com/hashicorp/terraform-registry-address/.go-version @@ -1 +1 @@ -1.15.2 +1.18 diff --git a/vendor/github.com/hashicorp/terraform-registry-address/README.md b/vendor/github.com/hashicorp/terraform-registry-address/README.md index 2ed8398da7..27db81f7c1 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/README.md +++ b/vendor/github.com/hashicorp/terraform-registry-address/README.md @@ -1,46 +1,149 @@ # terraform-registry-address -This package helps with representation, comparison and parsing of -Terraform Registry addresses, such as -`registry.terraform.io/grafana/grafana` or `hashicorp/aws`. +This module enables parsing, comparison and canonical representation of +[Terraform Registry](https://registry.terraform.io/) **provider** addresses +(such as `registry.terraform.io/grafana/grafana` or `hashicorp/aws`) +and **module** addresses (such as `hashicorp/subnets/cidr`). -The most common source of these addresses outside of Terraform Core -is JSON representation of state, plan, or schemas as obtained -via [`hashicorp/terraform-exec`](https://github.com/hashicorp/terraform-exec). +**Provider** addresses can be found in -## Example + - [`terraform show -json `](https://www.terraform.io/internals/json-format#configuration-representation) (`full_name`) + - [`terraform version -json`](https://www.terraform.io/cli/commands/version#example) (`provider_selections`) + - [`terraform providers schema -json`](https://www.terraform.io/cli/commands/providers/schema#providers-schema-representation) (keys of `provider_schemas`) + - within `required_providers` block in Terraform configuration (`*.tf`) + - Terraform [CLI configuration file](https://www.terraform.io/cli/config/config-file#provider-installation) + - Plugin [reattach configurations](https://www.terraform.io/plugin/debugging#running-terraform-with-a-provider-in-debug-mode) + +**Module** addresses can be found within `source` argument +of `module` block in Terraform configuration (`*.tf`) +and parts of the address (namespace and name) in the Registry API. + +## Compatibility + +The module assumes compatibility with Terraform v0.12 and later, +which have the mentioned JSON output produced by corresponding CLI flags. + +We recommend carefully reading the [ambigouous provider addresses](#Ambiguous-Provider-Addresses) +section below which may impact versions `0.12` and `0.13`. + +## Related Libraries + +Other libraries which may help with consuming most of the above Terraform +outputs in automation: + + - [`hashicorp/terraform-exec`](https://github.com/hashicorp/terraform-exec) + - [`hashicorp/terraform-json`](https://github.com/hashicorp/terraform-json) + +## Usage + +### Provider ```go -p, err := ParseRawProviderSourceString("hashicorp/aws") +pAddr, err := ParseProviderSource("hashicorp/aws") if err != nil { // deal with error } -// p == Provider{ +// pAddr == Provider{ // Type: "aws", // Namespace: "hashicorp", -// Hostname: svchost.Hostname("registry.terraform.io"), +// Hostname: DefaultProviderRegistryHost, // } ``` -## Legacy address +### Module + +```go +mAddr, err := ParseModuleSource("hashicorp/consul/aws//modules/consul-cluster") +if err != nil { + // deal with error +} + +// mAddr == Module{ +// Package: ModulePackage{ +// Host: DefaultProviderRegistryHost, +// Namespace: "hashicorp", +// Name: "consul", +// TargetSystem: "aws", +// }, +// Subdir: "modules/consul-cluster", +// }, +``` + +## Other Module Address Formats + +Modules can also be sourced from [other sources](https://www.terraform.io/language/modules/sources) +and these other sources (outside of Terraform Registry) +have different address formats, such as `./local` or +`github.com/hashicorp/example`. + +This library does _not_ recognize such other address formats +and it will return error upon parsing these. + +## Ambiguous Provider Addresses + +Qualified addresses with namespace (such as `hashicorp/aws`) +are used exclusively in all recent versions (`0.14+`) of Terraform. +If you only work with Terraform `v0.14.0+` configuration/output, you may +safely ignore the rest of this section and related part of the API. + +There are a few types of ambiguous addresses you may comes accross: + + - Terraform `v0.12` uses "namespace-less address", such as `aws`. + - Terraform `v0.13` may use `-` as a placeholder for the unknown namespace, + resulting in address such as `-/aws`. + - Terraform `v0.14+` _configuration_ still allows ambiguous providers + through `provider "" {}` block _without_ corresponding + entry inside `required_providers`, but these providers are always + resolved as `hashicorp/` and all JSON outputs only use that + resolved address. + +Both ambiguous address formats are accepted by `ParseProviderSource()` -A legacy address is by itself (without more context) ambiguous. -For example `aws` may represent either the official `hashicorp/aws` -or just any custom-built provider called `aws`. +```go +pAddr, err := ParseProviderSource("aws") +if err != nil { + // deal with error +} + +// pAddr == Provider{ +// Type: "aws", +// Namespace: UnknownProviderNamespace, // "?" +// Hostname: DefaultProviderRegistryHost, // "registry.terraform.io" +// } +pAddr.HasKnownNamespace() // == false +pAddr.IsLegacy() // == false +``` +```go +pAddr, err := ParseProviderSource("-/aws") +if err != nil { + // deal with error +} -Such ambiguous address can be produced by Terraform `<=0.12`. You can -just use `ImpliedProviderForUnqualifiedType` if you know for sure -the address was produced by an affected version. +// pAddr == Provider{ +// Type: "aws", +// Namespace: LegacyProviderNamespace, // "-" +// Hostname: DefaultProviderRegistryHost, // "registry.terraform.io" +// } +pAddr.HasKnownNamespace() // == true +pAddr.IsLegacy() // == true +``` -If you do not have that context you should parse the string via -`ParseRawProviderSourceString` and then check `addr.IsLegacy()`. +However `NewProvider()` will panic if you pass an empty namespace +or any placeholder indicating unknown namespace. + +```go +NewProvider(DefaultProviderRegistryHost, "", "aws") // panic +NewProvider(DefaultProviderRegistryHost, "-", "aws") // panic +NewProvider(DefaultProviderRegistryHost, "?", "aws") // panic +``` -### What to do with a legacy address? +If you come across an ambiguous address, you should resolve +it to a fully qualified one and use that one instead. -Ask the Registry API whether and where the provider was moved to +### Resolving Ambiguous Address -(`-` represents the legacy, basically unknown namespace) +The Registry API provides the safest way of resolving an ambiguous address. ```sh # grafana (redirected to its own namespace) @@ -54,28 +157,15 @@ $ curl -s https://registry.terraform.io/v1/providers/-/aws/versions | jq '(.id, null ``` -Then: +When you cache results, ensure you have invalidation +mechanism in place as target (migrated) namespace may change. - - Reparse the _new_ address (`moved_to`) of any _moved_ provider (e.g. `grafana/grafana`) via `ParseRawProviderSourceString` - - Reparse the full address (`id`) of any other provider (e.g. `hashicorp/aws`) - -Depending on context (legacy) `terraform` may need to be parsed separately. -Read more about this provider below. - -If for some reason you cannot ask the Registry API you may also use -`ParseAndInferProviderSourceString` which assumes that any legacy address -(including `terraform`) belongs to the `hashicorp` namespace. - -If you cache results (which you should), ensure you have invalidation -mechanism in place because target (migrated) namespace may change. -Hard-coding migrations anywhere in code is strongly discouraged. - -### `terraform` provider +#### `terraform` provider Like any other legacy address `terraform` is also ambiguous. Such address may (most unlikely) represent a custom-built provider called `terraform`, or the now archived [`hashicorp/terraform` provider in the registry](https://registry.terraform.io/providers/hashicorp/terraform/latest), -or (most likely) the `terraform` provider built into 0.12+, which is +or (most likely) the `terraform` provider built into 0.11+, which is represented via a dedicated FQN of `terraform.io/builtin/terraform` in 0.13+. You may be able to differentiate between these different providers if you @@ -85,4 +175,7 @@ Alternatively you may just treat the address as the builtin provider, i.e. assume all of its logic including schema is contained within Terraform Core. -In such case you should just use `NewBuiltInProvider("terraform")`. +In such case you should construct the address in the following way +```go +pAddr := NewProvider(BuiltInProviderHost, BuiltInProviderNamespace, "terraform") +``` diff --git a/vendor/github.com/hashicorp/terraform-registry-address/module.go b/vendor/github.com/hashicorp/terraform-registry-address/module.go new file mode 100644 index 0000000000..6af0c5976b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/module.go @@ -0,0 +1,251 @@ +package tfaddr + +import ( + "fmt" + "path" + "regexp" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" +) + +// Module is representing a module listed in a Terraform module +// registry. +type Module struct { + // Package is the registry package that the target module belongs to. + // The module installer must translate this into a ModuleSourceRemote + // using the registry API and then take that underlying address's + // Package in order to find the actual package location. + Package ModulePackage + + // If Subdir is non-empty then it represents a sub-directory within the + // remote package that the registry address eventually resolves to. + // This will ultimately become the suffix of the Subdir of the + // ModuleSourceRemote that the registry address translates to. + // + // Subdir uses a normalized forward-slash-based path syntax within the + // virtual filesystem represented by the final package. It will never + // include `../` or `./` sequences. + Subdir string +} + +// DefaultModuleRegistryHost is the hostname used for registry-based module +// source addresses that do not have an explicit hostname. +const DefaultModuleRegistryHost = svchost.Hostname("registry.terraform.io") + +var moduleRegistryNamePattern = regexp.MustCompile("^[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?$") +var moduleRegistryTargetSystemPattern = regexp.MustCompile("^[0-9a-z]{1,64}$") + +// ParseModuleSource only accepts module registry addresses, and +// will reject any other address type. +func ParseModuleSource(raw string) (Module, error) { + var err error + + var subDir string + raw, subDir = splitPackageSubdir(raw) + if strings.HasPrefix(subDir, "../") { + return Module{}, fmt.Errorf("subdirectory path %q leads outside of the module package", subDir) + } + + parts := strings.Split(raw, "/") + // A valid registry address has either three or four parts, because the + // leading hostname part is optional. + if len(parts) != 3 && len(parts) != 4 { + return Module{}, fmt.Errorf("a module registry source address must have either three or four slash-separated components") + } + + host := DefaultModuleRegistryHost + if len(parts) == 4 { + host, err = svchost.ForComparison(parts[0]) + if err != nil { + // The svchost library doesn't produce very good error messages to + // return to an end-user, so we'll use some custom ones here. + switch { + case strings.Contains(parts[0], "--"): + // Looks like possibly punycode, which we don't allow here + // to ensure that source addresses are written readably. + return Module{}, fmt.Errorf("invalid module registry hostname %q; internationalized domain names must be given as direct unicode characters, not in punycode", parts[0]) + default: + return Module{}, fmt.Errorf("invalid module registry hostname %q", parts[0]) + } + } + if !strings.Contains(host.String(), ".") { + return Module{}, fmt.Errorf("invalid module registry hostname: must contain at least one dot") + } + // Discard the hostname prefix now that we've processed it + parts = parts[1:] + } + + ret := Module{ + Package: ModulePackage{ + Host: host, + }, + + Subdir: subDir, + } + + if host == svchost.Hostname("github.com") || host == svchost.Hostname("bitbucket.org") { + return ret, fmt.Errorf("can't use %q as a module registry host, because it's reserved for installing directly from version control repositories", host) + } + + if ret.Package.Namespace, err = parseModuleRegistryName(parts[0]); err != nil { + if strings.Contains(parts[0], ".") { + // Seems like the user omitted one of the latter components in + // an address with an explicit hostname. + return ret, fmt.Errorf("source address must have three more components after the hostname: the namespace, the name, and the target system") + } + return ret, fmt.Errorf("invalid namespace %q: %s", parts[0], err) + } + if ret.Package.Name, err = parseModuleRegistryName(parts[1]); err != nil { + return ret, fmt.Errorf("invalid module name %q: %s", parts[1], err) + } + if ret.Package.TargetSystem, err = parseModuleRegistryTargetSystem(parts[2]); err != nil { + if strings.Contains(parts[2], "?") { + // The user was trying to include a query string, probably? + return ret, fmt.Errorf("module registry addresses may not include a query string portion") + } + return ret, fmt.Errorf("invalid target system %q: %s", parts[2], err) + } + + return ret, nil +} + +// MustParseModuleSource is a wrapper around ParseModuleSource that panics if +// it returns an error. +func MustParseModuleSource(raw string) (Module) { + mod, err := ParseModuleSource(raw) + if err != nil { + panic(err) + } + return mod +} + +// parseModuleRegistryName validates and normalizes a string in either the +// "namespace" or "name" position of a module registry source address. +func parseModuleRegistryName(given string) (string, error) { + // Similar to the names in provider source addresses, we defined these + // to be compatible with what filesystems and typical remote systems + // like GitHub allow in names. Unfortunately we didn't end up defining + // these exactly equivalently: provider names can only use dashes as + // punctuation, whereas module names can use underscores. So here we're + // using some regular expressions from the original module source + // implementation, rather than using the IDNA rules as we do in + // ParseProviderPart. + + if !moduleRegistryNamePattern.MatchString(given) { + return "", fmt.Errorf("must be between one and 64 characters, including ASCII letters, digits, dashes, and underscores, where dashes and underscores may not be the prefix or suffix") + } + + // We also skip normalizing the name to lowercase, because we historically + // didn't do that and so existing module registries might be doing + // case-sensitive matching. + return given, nil +} + +// parseModuleRegistryTargetSystem validates and normalizes a string in the +// "target system" position of a module registry source address. This is +// what we historically called "provider" but never actually enforced as +// being a provider address, and now _cannot_ be a provider address because +// provider addresses have three slash-separated components of their own. +func parseModuleRegistryTargetSystem(given string) (string, error) { + // Similar to the names in provider source addresses, we defined these + // to be compatible with what filesystems and typical remote systems + // like GitHub allow in names. Unfortunately we didn't end up defining + // these exactly equivalently: provider names can't use dashes or + // underscores. So here we're using some regular expressions from the + // original module source implementation, rather than using the IDNA rules + // as we do in ParseProviderPart. + + if !moduleRegistryTargetSystemPattern.MatchString(given) { + return "", fmt.Errorf("must be between one and 64 ASCII letters or digits") + } + + // We also skip normalizing the name to lowercase, because we historically + // didn't do that and so existing module registries might be doing + // case-sensitive matching. + return given, nil +} + +// String returns a full representation of the address, including any +// additional components that are typically implied by omission in +// user-written addresses. +// +// We typically use this longer representation in error message, in case +// the inclusion of normally-omitted components is helpful in debugging +// unexpected behavior. +func (s Module) String() string { + if s.Subdir != "" { + return s.Package.String() + "//" + s.Subdir + } + return s.Package.String() +} + +// ForDisplay is similar to String but instead returns a representation of +// the idiomatic way to write the address in configuration, omitting +// components that are commonly just implied in addresses written by +// users. +// +// We typically use this shorter representation in informational messages, +// such as the note that we're about to start downloading a package. +func (s Module) ForDisplay() string { + if s.Subdir != "" { + return s.Package.ForDisplay() + "//" + s.Subdir + } + return s.Package.ForDisplay() +} + +// splitPackageSubdir detects whether the given address string has a +// subdirectory portion, and if so returns a non-empty subDir string +// along with the trimmed package address. +// +// If the given string doesn't have a subdirectory portion then it'll +// just be returned verbatim in packageAddr, with an empty subDir value. +func splitPackageSubdir(given string) (packageAddr, subDir string) { + packageAddr, subDir = sourceDirSubdir(given) + if subDir != "" { + subDir = path.Clean(subDir) + } + return packageAddr, subDir +} + +// sourceDirSubdir takes a source URL and returns a tuple of the URL without +// the subdir and the subdir. +// +// ex: +// dom.com/path/?q=p => dom.com/path/?q=p, "" +// proto://dom.com/path//*?q=p => proto://dom.com/path?q=p, "*" +// proto://dom.com/path//path2?q=p => proto://dom.com/path?q=p, "path2" +func sourceDirSubdir(src string) (string, string) { + // URL might contains another url in query parameters + stop := len(src) + if idx := strings.Index(src, "?"); idx > -1 { + stop = idx + } + + // Calculate an offset to avoid accidentally marking the scheme + // as the dir. + var offset int + if idx := strings.Index(src[:stop], "://"); idx > -1 { + offset = idx + 3 + } + + // First see if we even have an explicit subdir + idx := strings.Index(src[offset:stop], "//") + if idx == -1 { + return src, "" + } + + idx += offset + subdir := src[idx+2:] + src = src[:idx] + + // Next, check if we have query parameters and push them onto the + // URL. + if idx = strings.Index(subdir, "?"); idx > -1 { + query := subdir[idx:] + subdir = subdir[:idx] + src += query + } + + return src, subdir +} diff --git a/vendor/github.com/hashicorp/terraform-registry-address/module_package.go b/vendor/github.com/hashicorp/terraform-registry-address/module_package.go new file mode 100644 index 0000000000..d8ad2534a0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/module_package.go @@ -0,0 +1,58 @@ +package tfaddr + +import ( + "strings" + + svchost "github.com/hashicorp/terraform-svchost" +) + +// A ModulePackage is an extra indirection over a ModulePackage where +// we use a module registry to translate a more symbolic address (and +// associated version constraint given out of band) into a physical source +// location. +// +// ModulePackage is distinct from ModulePackage because they have +// disjoint use-cases: registry package addresses are only used to query a +// registry in order to find a real module package address. These being +// distinct is intended to help future maintainers more easily follow the +// series of steps in the module installer, with the help of the type checker. +type ModulePackage struct { + Host svchost.Hostname + Namespace string + Name string + TargetSystem string +} + +func (s ModulePackage) String() string { + // Note: we're using the "display" form of the hostname here because + // for our service hostnames "for display" means something different: + // it means to render non-ASCII characters directly as Unicode + // characters, rather than using the "punycode" representation we + // use for internal processing, and so the "display" representation + // is actually what users would write in their configurations. + return s.Host.ForDisplay() + "/" + s.ForRegistryProtocol() +} + +func (s ModulePackage) ForDisplay() string { + if s.Host == DefaultModuleRegistryHost { + return s.ForRegistryProtocol() + } + return s.Host.ForDisplay() + "/" + s.ForRegistryProtocol() +} + +// ForRegistryProtocol returns a string representation of just the namespace, +// name, and target system portions of the address, always omitting the +// registry hostname and the subdirectory portion, if any. +// +// This is primarily intended for generating addresses to send to the +// registry in question via the registry protocol, since the protocol +// skips sending the registry its own hostname as part of identifiers. +func (s ModulePackage) ForRegistryProtocol() string { + var buf strings.Builder + buf.WriteString(s.Namespace) + buf.WriteByte('/') + buf.WriteString(s.Name) + buf.WriteByte('/') + buf.WriteString(s.TargetSystem) + return buf.String() +} diff --git a/vendor/github.com/hashicorp/terraform-registry-address/provider.go b/vendor/github.com/hashicorp/terraform-registry-address/provider.go index 4dd0a5b781..23e1e221f2 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/provider.go +++ b/vendor/github.com/hashicorp/terraform-registry-address/provider.go @@ -16,9 +16,9 @@ type Provider struct { Hostname svchost.Hostname } -// DefaultRegistryHost is the hostname used for provider addresses that do +// DefaultProviderRegistryHost is the hostname used for provider addresses that do // not have an explicit hostname. -const DefaultRegistryHost = svchost.Hostname("registry.terraform.io") +const DefaultProviderRegistryHost = svchost.Hostname("registry.terraform.io") // BuiltInProviderHost is the pseudo-hostname used for the "built-in" provider // namespace. Built-in provider addresses must also have their namespace set @@ -34,11 +34,18 @@ const BuiltInProviderHost = svchost.Hostname("terraform.io") // special, even if they haven't encountered the concept formally yet. const BuiltInProviderNamespace = "builtin" +// UnknownProviderNamespace is the special string used to indicate +// unknown namespace, e.g. in "aws". This is equivalent to +// LegacyProviderNamespace for <0.12 style address. This namespace +// would never be produced by Terraform itself explicitly, it is +// only an internal placeholder. +const UnknownProviderNamespace = "?" + // LegacyProviderNamespace is the special string used in the Namespace field // of type Provider to mark a legacy provider address. This special namespace // value would normally be invalid, and can be used only when the hostname is -// DefaultRegistryHost because that host owns the mapping from legacy name to -// FQN. +// DefaultProviderRegistryHost because that host owns the mapping from legacy name to +// FQN. This may be produced by Terraform 0.13. const LegacyProviderNamespace = "-" // String returns an FQN string, indended for use in machine-readable output. @@ -56,7 +63,7 @@ func (pt Provider) ForDisplay() string { panic("called ForDisplay on zero-value addrs.Provider") } - if pt.Hostname == DefaultRegistryHost { + if pt.Hostname == DefaultProviderRegistryHost { return pt.Namespace + "/" + pt.Type } return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type @@ -75,10 +82,18 @@ func (pt Provider) ForDisplay() string { // ParseProviderPart first to check that the given value is valid. func NewProvider(hostname svchost.Hostname, namespace, typeName string) Provider { if namespace == LegacyProviderNamespace { - // Legacy provider addresses must always be created via - // NewLegacyProvider so that we can use static analysis to find - // codepaths still working with those. - panic("attempt to create legacy provider address using NewProvider; use NewLegacyProvider instead") + // Legacy provider addresses must always be created via struct + panic("attempt to create legacy provider address using NewProvider; use Provider{} instead") + } + if namespace == UnknownProviderNamespace { + // Provider addresses with unknown namespace must always + // be created via struct + panic("attempt to create provider address with unknown namespace using NewProvider; use Provider{} instead") + } + if namespace == "" { + // This case is already handled by MustParseProviderPart() below, + // but we catch it early to provide more helpful message. + panic("attempt to create provider address with empty namespace") } return Provider{ @@ -88,63 +103,6 @@ func NewProvider(hostname svchost.Hostname, namespace, typeName string) Provider } } -// ImpliedProviderForUnqualifiedType represents the rules for inferring what -// provider FQN a user intended when only a naked type name is available. -// -// For all except the type name "terraform" this returns a so-called "default" -// provider, which is under the registry.terraform.io/hashicorp/ namespace. -// -// As a special case, the string "terraform" maps to -// "terraform.io/builtin/terraform" because that is the more likely user -// intent than the now-unmaintained "registry.terraform.io/hashicorp/terraform" -// which remains only for compatibility with older Terraform versions. -func ImpliedProviderForUnqualifiedType(typeName string) Provider { - switch typeName { - case "terraform": - // Note for future maintainers: any additional strings we add here - // as implied to be builtin must never also be use as provider names - // in the registry.terraform.io/hashicorp/... namespace, because - // otherwise older versions of Terraform could implicitly select - // the registry name instead of the internal one. - return NewBuiltInProvider(typeName) - default: - return NewDefaultProvider(typeName) - } -} - -// NewDefaultProvider returns the default address of a HashiCorp-maintained, -// Registry-hosted provider. -func NewDefaultProvider(name string) Provider { - return Provider{ - Type: MustParseProviderPart(name), - Namespace: "hashicorp", - Hostname: DefaultRegistryHost, - } -} - -// NewBuiltInProvider returns the address of a "built-in" provider. See -// the docs for Provider.IsBuiltIn for more information. -func NewBuiltInProvider(name string) Provider { - return Provider{ - Type: MustParseProviderPart(name), - Namespace: BuiltInProviderNamespace, - Hostname: BuiltInProviderHost, - } -} - -// NewLegacyProvider returns a mock address for a provider. -// This will be removed when ProviderType is fully integrated. -func NewLegacyProvider(name string) Provider { - return Provider{ - // We intentionally don't normalize and validate the legacy names, - // because existing code expects legacy provider names to pass through - // verbatim, even if not compliant with our new naming rules. - Type: name, - Namespace: LegacyProviderNamespace, - Hostname: DefaultRegistryHost, - } -} - // LegacyString returns the provider type, which is frequently used // interchangeably with provider name. This function can and should be removed // when provider type is fully integrated. As a safeguard for future @@ -167,6 +125,12 @@ func (pt Provider) IsZero() bool { return pt == Provider{} } +// HasKnownNamespace returns true if the provider namespace is known +// (also if it is legacy namespace) +func (pt Provider) HasKnownNamespace() bool { + return pt.Namespace != UnknownProviderNamespace +} + // IsBuiltIn returns true if the receiver is the address of a "built-in" // provider. That is, a provider under terraform.io/builtin/ which is // included as part of the Terraform binary itself rather than one to be @@ -201,25 +165,16 @@ func (pt Provider) IsLegacy() bool { panic("called IsLegacy() on zero-value addrs.Provider") } - return pt.Hostname == DefaultRegistryHost && pt.Namespace == LegacyProviderNamespace + return pt.Hostname == DefaultProviderRegistryHost && pt.Namespace == LegacyProviderNamespace } -// IsDefault returns true if the provider is a default hashicorp provider -func (pt Provider) IsDefault() bool { - if pt.IsZero() { - panic("called IsDefault() on zero-value addrs.Provider") - } - - return pt.Hostname == DefaultRegistryHost && pt.Namespace == "hashicorp" -} - // Equals returns true if the receiver and other provider have the same attributes. func (pt Provider) Equals(other Provider) bool { return pt == other } -// ParseRawProviderSourceString parses the source attribute and returns a provider. +// ParseProviderSource parses the source attribute and returns a provider. // This is intended primarily to parse the FQN-like strings returned by // terraform-config-inspect. // @@ -230,7 +185,7 @@ func (pt Provider) Equals(other Provider) bool { // // "name"-only format is parsed as -/name (i.e. legacy namespace) // requiring further identification of the namespace via Registry API -func ParseRawProviderSourceString(str string) (Provider, error) { +func ParseProviderSource(str string) (Provider, error) { var ret Provider parts, err := parseSourceStringParts(str) if err != nil { @@ -239,10 +194,14 @@ func ParseRawProviderSourceString(str string) (Provider, error) { name := parts[len(parts)-1] ret.Type = name - ret.Hostname = DefaultRegistryHost + ret.Hostname = DefaultProviderRegistryHost if len(parts) == 1 { - return NewLegacyProvider(name), nil + return Provider{ + Hostname: DefaultProviderRegistryHost, + Namespace: UnknownProviderNamespace, + Type: name, + }, nil } if len(parts) >= 2 { @@ -278,13 +237,13 @@ func ParseRawProviderSourceString(str string) (Provider, error) { ret.Hostname = hn } - if ret.Namespace == LegacyProviderNamespace && ret.Hostname != DefaultRegistryHost { + if ret.Namespace == LegacyProviderNamespace && ret.Hostname != DefaultProviderRegistryHost { // Legacy provider addresses must always be on the default registry // host, because the default registry host decides what actual FQN // each one maps to. return Provider{}, &ParserError{ Summary: "Invalid provider namespace", - Detail: "The legacy provider namespace \"-\" can be used only with hostname " + DefaultRegistryHost.ForDisplay() + ".", + Detail: "The legacy provider namespace \"-\" can be used only with hostname " + DefaultProviderRegistryHost.ForDisplay() + ".", } } @@ -332,28 +291,52 @@ func ParseRawProviderSourceString(str string) (Provider, error) { return ret, nil } -// ParseAndInferProviderSourceString parses the source attribute and returns a provider. -// This is intended primarily to parse the FQN-like strings returned by -// terraform-config-inspect. -// -// The following are valid source string formats: -// name -// namespace/name -// hostname/namespace/name -// -// "name" format is assumed to be hashicorp/name -func ParseAndInferProviderSourceString(str string) (Provider, error) { - var ret Provider - parts, err := parseSourceStringParts(str) +// MustParseProviderSource is a wrapper around ParseProviderSource that panics if +// it returns an error. +func MustParseProviderSource(raw string) (Provider) { + p, err := ParseProviderSource(raw) if err != nil { - return ret, err + panic(err) } + return p +} - if len(parts) == 1 { - return NewDefaultProvider(parts[0]), nil +// ValidateProviderAddress returns error if the given address is not FQN, +// that is if it is missing any of the three components from +// hostname/namespace/name. +func ValidateProviderAddress(raw string) error { + parts, err := parseSourceStringParts(raw) + if err != nil { + return err + } + + if len(parts) != 3 { + return &ParserError{ + Summary: "Invalid provider address format", + Detail: `Expected FQN in the format "hostname/namespace/name"`, + } + } + + p, err := ParseProviderSource(raw) + if err != nil { + return err + } + + if !p.HasKnownNamespace() { + return &ParserError{ + Summary: "Unknown provider namespace", + Detail: `Expected FQN in the format "hostname/namespace/name"`, + } + } + + if !p.IsLegacy() { + return &ParserError{ + Summary: "Invalid legacy provider namespace", + Detail: `Expected FQN in the format "hostname/namespace/name"`, + } } - return ParseRawProviderSourceString(str) + return nil } func parseSourceStringParts(str string) ([]string, error) { @@ -390,16 +373,6 @@ func parseSourceStringParts(str string) ([]string, error) { return parts, nil } -// MustParseRawProviderSourceString is a wrapper around ParseRawProviderSourceString that panics if -// it returns an error. -func MustParseRawProviderSourceString(str string) Provider { - result, err := ParseRawProviderSourceString(str) - if err != nil { - panic(err) - } - return result -} - // ParseProviderPart processes an addrs.Provider namespace or type string // provided by an end-user, producing a normalized version if possible or // an error if the string contains invalid characters. @@ -468,15 +441,3 @@ func MustParseProviderPart(given string) string { } return result } - -// IsProviderPartNormalized compares a given string to the result of ParseProviderPart(string) -func IsProviderPartNormalized(str string) (bool, error) { - normalized, err := ParseProviderPart(str) - if err != nil { - return false, err - } - if str == normalized { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml deleted file mode 100644 index 7942c565ce..0000000000 --- a/vendor/github.com/mattn/go-colorable/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -sudo: false -go: - - 1.13.x - - tip - -before_install: - - go get -t -v ./... - -script: - - ./go.test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) - diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md index e055952b66..ca0483711c 100644 --- a/vendor/github.com/mattn/go-colorable/README.md +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -1,6 +1,6 @@ # go-colorable -[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) [![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) [![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) [![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go index 1f7806fe16..416d1bbbf8 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index 08cbd1e0fa..766d94603a 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -1,5 +1,5 @@ -// +build !windows -// +build !appengine +//go:build !windows && !appengine +// +build !windows,!appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 41215d7fc4..1846ad5ab4 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !appengine +//go:build windows && !appengine +// +build windows,!appengine package colorable @@ -452,18 +452,22 @@ func (w *Writer) Write(data []byte) (n int, err error) { } else { er = bytes.NewReader(data) } - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go index 95f2c6be25..05d6f74bf6 100644 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -18,18 +18,22 @@ func NewNonColorable(w io.Writer) io.Writer { // Write writes data on console func (w *NonColorable) Write(data []byte) (n int, err error) { er := bytes.NewReader(data) - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop @@ -38,7 +42,6 @@ loop: continue } - var buf bytes.Buffer for { c, err := er.ReadByte() if err != nil { @@ -47,7 +50,6 @@ loop: if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { break } - buf.Write([]byte(string(c))) } } diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml deleted file mode 100644 index 604314dd44..0000000000 --- a/vendor/github.com/mattn/go-isatty/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: false -go: - - 1.13.x - - tip - -before_install: - - go get -t -v ./... - -script: - - ./go.test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 711f288085..39bbcf00f0 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -1,3 +1,4 @@ +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine // +build darwin freebsd openbsd netbsd dragonfly // +build !appengine diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go index ff714a3761..31503226f6 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -1,4 +1,5 @@ -// +build appengine js nacl +//go:build appengine || js || nacl || wasm +// +build appengine js nacl wasm package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go index c5b6e0c084..bae7f9bb3d 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_plan9.go +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -1,3 +1,4 @@ +//go:build plan9 // +build plan9 package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go index bdd5c79a07..0c3acf2dc2 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -1,5 +1,5 @@ -// +build solaris -// +build !appengine +//go:build solaris && !appengine +// +build solaris,!appengine package isatty @@ -8,10 +8,9 @@ import ( ) // IsTerminal returns true if the given file descriptor is a terminal. -// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c func IsTerminal(fd uintptr) bool { - var termio unix.Termio - err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) return err == nil } diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go index 31a1ca973c..67787657fb 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -1,4 +1,5 @@ -// +build linux aix +//go:build (linux || aix || zos) && !appengine +// +build linux aix zos // +build !appengine package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go index 1fa8691540..8e3c99171b 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !appengine +//go:build windows && !appengine +// +build windows,!appengine package isatty @@ -76,7 +76,7 @@ func isCygwinPipeName(name string) bool { } // getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler -// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion // guys are using Windows XP, this is a workaround for those guys, it will also work on system from // Windows vista to 10 // see https://stackoverflow.com/a/18792477 for details diff --git a/vendor/github.com/mattn/go-isatty/renovate.json b/vendor/github.com/mattn/go-isatty/renovate.json deleted file mode 100644 index 5ae9d96b74..0000000000 --- a/vendor/github.com/mattn/go-isatty/renovate.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "extends": [ - "config:base" - ], - "postUpdateOptions": [ - "gomodTidy" - ] -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d2679..3bb22a9718 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -3,6 +3,7 @@ package assert import ( "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +31,8 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +302,27 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } } return compareEqual, false @@ -310,7 +334,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +347,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +359,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +372,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +383,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +395,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 0000000000..da867903e2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatibility +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 0000000000..1701af2a3c --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd1229a8..27e2420ed2 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6f07..d9ea368d0a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b47182a..7594487835 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index bcac4401f5..0357b2231a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -718,10 +718,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +768,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +791,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -831,7 +835,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,7 +856,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } subsetValue := reflect.ValueOf(subset) @@ -875,7 +879,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1004,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1161,11 +1159,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1190,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1252,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1265,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1300,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1377,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1611,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1653,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 51820df2e6..59c48277ac 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int t.FailNow() } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContains(t, theError, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContainsf(t, theError, contains, msg, args...) { + return + } + t.FailNow() +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index ed54a9d83f..5bb07c89c6 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go index c5898db465..4652247b8a 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -15,6 +15,7 @@ const bufSize = 256 // xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only // be called when the vector facility is available. Implementation in asm_s390x.s. +// //go:noescape func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go index 44dc8e8caf..edcf163c4e 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go @@ -1,13 +1,16 @@ // Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. +//go:build amd64 && gc && !purego // +build amd64,gc,!purego package field // feMul sets out = a * b. It works like feMulGeneric. +// //go:noescape func feMul(out *Element, a *Element, b *Element) // feSquare sets out = a * a. It works like feSquareGeneric. +// //go:noescape func feSquare(out *Element, a *Element) diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go index c942a65904..e041da5ea3 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -136,7 +136,7 @@ func shiftRightBy2(a uint128) uint128 { // updateGeneric absorbs msg into the state.h accumulator. For each chunk m of // 128 bits of message, it computes // -// h₊ = (h + m) * r mod 2¹³⁰ - 5 +// h₊ = (h + m) * r mod 2¹³⁰ - 5 // // If the msg length is not a multiple of TagSize, it assumes the last // incomplete chunk is the final one. @@ -278,8 +278,7 @@ const ( // finalize completes the modular reduction of h and computes // -// out = h + s mod 2¹²⁸ -// +// out = h + s mod 2¹²⁸ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { h0, h1, h2 := h[0], h[1], h[2] diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go index 62cc9f8470..ec95966889 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go @@ -14,6 +14,7 @@ import ( // updateVX is an assembly implementation of Poly1305 that uses vector // instructions. It must only be called if the vector facility (vx) is // available. +// //go:noescape func updateVX(state *macState, msg []byte) diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go index ebc87876e6..be342ad473 100644 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -23,12 +23,14 @@ import ( // A Block represents an OpenPGP armored structure. // // The encoded form is: -// -----BEGIN Type----- -// Headers // -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// // where Headers is a possibly empty sequence of Key: Value lines. // // Since the armored data can be very large, this package presents a streaming diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go index 6f07582c37..5b6e16c19d 100644 --- a/vendor/golang.org/x/crypto/openpgp/armor/encode.go +++ b/vendor/golang.org/x/crypto/openpgp/armor/encode.go @@ -96,7 +96,8 @@ func (l *lineBreaker) Close() (err error) { // trailer. // // It's built into a stack of io.Writers: -// encoding -> base64 encoder -> lineBreaker -> out +// +// encoding -> base64 encoder -> lineBreaker -> out type encoding struct { out io.Writer breaker *lineBreaker diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go index 84396a0896..743b35a120 100644 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -77,8 +77,8 @@ func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err // returns the plaintext of the message. An error can result only if the // ciphertext is invalid. Users should keep in mind that this is a padding // oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// be used to break the cryptosystem. See “Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1”, Daniel // Bleichenbacher, Advances in Cryptology (Crypto '98), func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { s := new(big.Int).Exp(c1, priv.X, priv.P) diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go index 593f653008..904b57e01d 100644 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -32,7 +32,7 @@ import ( // can get a derived key for e.g. AES-256 (which needs a 32-byte key) by // doing: // -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) // // Remember to get a good random salt. At least 8 bytes is recommended by the // RFC. diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go index bbe4494c6c..c971a99fa6 100644 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -186,7 +186,7 @@ func smix(b []byte, r, N int, v, xy []uint32) { // For example, you can get a derived key for e.g. AES-256 (which needs a // 32-byte key) by doing: // -// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) +// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) // // The recommended parameters for interactive logins as of 2017 are N=32768, r=8 // and p=1. The parameters N, r, and p should be increased as memory latency and diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go index b909471cc0..3c4d18a15c 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -8,7 +8,8 @@ // ssh-agent process using the sample server. // // References: -// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 +// +// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 package agent // import "golang.org/x/crypto/ssh/agent" import ( @@ -25,7 +26,6 @@ import ( "math/big" "sync" - "crypto" "golang.org/x/crypto/ed25519" "golang.org/x/crypto/ssh" ) @@ -771,19 +771,53 @@ func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, return s.agent.Sign(s.pub, data) } -func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) { +func (s *agentKeyringSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*ssh.Signature, error) { + if algorithm == "" || algorithm == underlyingAlgo(s.pub.Type()) { + return s.Sign(rand, data) + } + var flags SignatureFlags - if opts != nil { - switch opts.HashFunc() { - case crypto.SHA256: - flags = SignatureFlagRsaSha256 - case crypto.SHA512: - flags = SignatureFlagRsaSha512 - } + switch algorithm { + case ssh.KeyAlgoRSASHA256: + flags = SignatureFlagRsaSha256 + case ssh.KeyAlgoRSASHA512: + flags = SignatureFlagRsaSha512 + default: + return nil, fmt.Errorf("agent: unsupported algorithm %q", algorithm) } + return s.agent.SignWithFlags(s.pub, data, flags) } +var _ ssh.AlgorithmSigner = &agentKeyringSigner{} + +// certKeyAlgoNames is a mapping from known certificate algorithm names to the +// corresponding public key signature algorithm. +// +// This map must be kept in sync with the one in certs.go. +var certKeyAlgoNames = map[string]string{ + ssh.CertAlgoRSAv01: ssh.KeyAlgoRSA, + ssh.CertAlgoRSASHA256v01: ssh.KeyAlgoRSASHA256, + ssh.CertAlgoRSASHA512v01: ssh.KeyAlgoRSASHA512, + ssh.CertAlgoDSAv01: ssh.KeyAlgoDSA, + ssh.CertAlgoECDSA256v01: ssh.KeyAlgoECDSA256, + ssh.CertAlgoECDSA384v01: ssh.KeyAlgoECDSA384, + ssh.CertAlgoECDSA521v01: ssh.KeyAlgoECDSA521, + ssh.CertAlgoSKECDSA256v01: ssh.KeyAlgoSKECDSA256, + ssh.CertAlgoED25519v01: ssh.KeyAlgoED25519, + ssh.CertAlgoSKED25519v01: ssh.KeyAlgoSKED25519, +} + +// underlyingAlgo returns the signature algorithm associated with algo (which is +// an advertised or negotiated public key or host key algorithm). These are +// usually the same, except for certificate algorithms. +func underlyingAlgo(algo string) string { + if a, ok := certKeyAlgoNames[algo]; ok { + return a + } + return algo +} + // Calls an extension method. It is up to the agent implementation as to whether or not // any particular extension is supported and may always return an error. Because the // type of the response is up to the implementation, this returns the bytes of the diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go index c6eb56dd50..21bfa870fa 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go @@ -205,9 +205,9 @@ func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureF var algorithm string switch flags { case SignatureFlagRsaSha256: - algorithm = ssh.SigAlgoRSASHA2256 + algorithm = ssh.KeyAlgoRSASHA256 case SignatureFlagRsaSha512: - algorithm = ssh.SigAlgoRSASHA2512 + algorithm = ssh.KeyAlgoRSASHA512 default: return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags) } diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index 6605bf6449..4600c20772 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -14,8 +14,10 @@ import ( "time" ) -// These constants from [PROTOCOL.certkeys] represent the key algorithm names -// for certificate types supported by this package. +// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear +// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. +// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't +// appear in the Signature.Format field. const ( CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" @@ -25,14 +27,21 @@ const ( CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" + + // CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a + // Certificate.Type (or PublicKey.Type), but only in + // ClientConfig.HostKeyAlgorithms. + CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com" + CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com" ) -// These constants from [PROTOCOL.certkeys] represent additional signature -// algorithm names for certificate types supported by this package. const ( - CertSigAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertSigAlgoRSASHA2256v01 = "rsa-sha2-256-cert-v01@openssh.com" - CertSigAlgoRSASHA2512v01 = "rsa-sha2-512-cert-v01@openssh.com" + // Deprecated: use CertAlgoRSAv01. + CertSigAlgoRSAv01 = CertAlgoRSAv01 + // Deprecated: use CertAlgoRSASHA256v01. + CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01 + // Deprecated: use CertAlgoRSASHA512v01. + CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01 ) // Certificate types distinguish between host and user @@ -431,10 +440,14 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { } c.SignatureKey = authority.PublicKey() - if v, ok := authority.(AlgorithmSigner); ok { - if v.PublicKey().Type() == KeyAlgoRSA { - authority = &rsaSigner{v, SigAlgoRSASHA2512} + // Default to KeyAlgoRSASHA512 for ssh-rsa signers. + if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { + sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) + if err != nil { + return err } + c.Signature = sig + return nil } sig, err := authority.Sign(rand, c.bytesForSigning()) @@ -445,32 +458,42 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { return nil } -// certAlgoNames includes a mapping from signature algorithms to the -// corresponding certificate signature algorithm. When a key type (such -// as ED25516) is associated with only one algorithm, the KeyAlgo -// constant is used instead of the SigAlgo. -var certAlgoNames = map[string]string{ - SigAlgoRSA: CertSigAlgoRSAv01, - SigAlgoRSASHA2256: CertSigAlgoRSASHA2256v01, - SigAlgoRSASHA2512: CertSigAlgoRSASHA2512v01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01, - KeyAlgoED25519: CertAlgoED25519v01, - KeyAlgoSKED25519: CertAlgoSKED25519v01, +// certKeyAlgoNames is a mapping from known certificate algorithm names to the +// corresponding public key signature algorithm. +// +// This map must be kept in sync with the one in agent/client.go. +var certKeyAlgoNames = map[string]string{ + CertAlgoRSAv01: KeyAlgoRSA, + CertAlgoRSASHA256v01: KeyAlgoRSASHA256, + CertAlgoRSASHA512v01: KeyAlgoRSASHA512, + CertAlgoDSAv01: KeyAlgoDSA, + CertAlgoECDSA256v01: KeyAlgoECDSA256, + CertAlgoECDSA384v01: KeyAlgoECDSA384, + CertAlgoECDSA521v01: KeyAlgoECDSA521, + CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256, + CertAlgoED25519v01: KeyAlgoED25519, + CertAlgoSKED25519v01: KeyAlgoSKED25519, +} + +// underlyingAlgo returns the signature algorithm associated with algo (which is +// an advertised or negotiated public key or host key algorithm). These are +// usually the same, except for certificate algorithms. +func underlyingAlgo(algo string) string { + if a, ok := certKeyAlgoNames[algo]; ok { + return a + } + return algo } -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo +// certificateAlgo returns the certificate algorithms that uses the provided +// underlying signature algorithm. +func certificateAlgo(algo string) (certAlgo string, ok bool) { + for certName, algoName := range certKeyAlgoNames { + if algoName == algo { + return certName, true } } - panic("unknown cert algorithm") + return "", false } func (cert *Certificate) bytesForSigning() []byte { @@ -514,13 +537,13 @@ func (c *Certificate) Marshal() []byte { return result } -// Type returns the key name. It is part of the PublicKey interface. +// Type returns the certificate algorithm name. It is part of the PublicKey interface. func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] + certName, ok := certificateAlgo(c.Key.Type()) if !ok { - panic("unknown cert key type " + c.Key.Type()) + panic("unknown certificate type for key type " + c.Key.Type()) } - return algo + return certName } // Verify verifies a signature against the certificate's public diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index f8bdf4984c..770e8a6639 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -640,7 +640,7 @@ const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" // chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com // AEAD, which is described here: // -// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 +// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 // // the methods here also implement padding, which RFC4253 Section 6 // also requires of stream ciphers. diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go index ba8621a891..bdc356cbdf 100644 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -113,25 +113,16 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e return c.clientAuthenticate(config) } -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. +// verifyHostKeySignature verifies the host key obtained in the key exchange. +// algo is the negotiated algorithm, and may be a certificate type. func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error { sig, rest, ok := parseSignatureBody(result.Signature) if len(rest) > 0 || !ok { return errors.New("ssh: signature parse error") } - // For keys, underlyingAlgo is exactly algo. For certificates, - // we have to look up the underlying key algorithm that SSH - // uses to evaluate signatures. - underlyingAlgo := algo - for sigAlgo, certAlgo := range certAlgoNames { - if certAlgo == algo { - underlyingAlgo = sigAlgo - } - } - if sig.Format != underlyingAlgo { - return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, underlyingAlgo) + if a := underlyingAlgo(algo); sig.Format != a { + return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a) } return hostKey.Verify(result.H, sig) @@ -237,11 +228,11 @@ type ClientConfig struct { // be used for the connection. If empty, a reasonable default is used. ClientVersion string - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of + // HostKeyAlgorithms lists the public key algorithms that the client will + // accept from the server for host key authentication, in order of // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + // string returned from a PublicKey.Type method may be used, or + // any of the CertAlgo and KeyAlgo constants. HostKeyAlgorithms []string // Timeout is the maximum amount of time for the TCP connection to establish. diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index c611aeb684..409b5ea1d4 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "strings" ) type authResult int @@ -29,6 +30,33 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { if err != nil { return err } + // The server may choose to send a SSH_MSG_EXT_INFO at this point (if we + // advertised willingness to receive one, which we always do) or not. See + // RFC 8308, Section 2.4. + extensions := make(map[string][]byte) + if len(packet) > 0 && packet[0] == msgExtInfo { + var extInfo extInfoMsg + if err := Unmarshal(packet, &extInfo); err != nil { + return err + } + payload := extInfo.Payload + for i := uint32(0); i < extInfo.NumExtensions; i++ { + name, rest, ok := parseString(payload) + if !ok { + return parseError(msgExtInfo) + } + value, rest, ok := parseString(rest) + if !ok { + return parseError(msgExtInfo) + } + extensions[string(name)] = value + payload = rest + } + packet, err = c.transport.readPacket() + if err != nil { + return err + } + } var serviceAccept serviceAcceptMsg if err := Unmarshal(packet, &serviceAccept); err != nil { return err @@ -41,7 +69,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { sessionID := c.transport.getSessionID() for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) if err != nil { return err } @@ -93,7 +121,7 @@ type AuthMethod interface { // If authentication is not successful, a []string of alternative // method names is returned. If the slice is nil, it will be ignored // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) + auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) // method returns the RFC 4252 method name. method() string @@ -102,7 +130,7 @@ type AuthMethod interface { // "none" authentication, RFC 4252 section 5.2. type noneAuth int -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { if err := c.writePacket(Marshal(&userAuthRequestMsg{ User: user, Service: serviceSSH, @@ -122,7 +150,7 @@ func (n *noneAuth) method() string { // a function call, e.g. by prompting the user. type passwordCallback func() (password string, err error) -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { type passwordAuthMsg struct { User string `sshtype:"50"` Service string @@ -189,7 +217,46 @@ func (cb publicKeyCallback) method() string { return "publickey" } -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) { + keyFormat := signer.PublicKey().Type() + + // Like in sendKexInit, if the public key implements AlgorithmSigner we + // assume it supports all algorithms, otherwise only the key format one. + as, ok := signer.(AlgorithmSigner) + if !ok { + return algorithmSignerWrapper{signer}, keyFormat + } + + extPayload, ok := extensions["server-sig-algs"] + if !ok { + // If there is no "server-sig-algs" extension, fall back to the key + // format algorithm. + return as, keyFormat + } + + // The server-sig-algs extension only carries underlying signature + // algorithm, but we are trying to select a protocol-level public key + // algorithm, which might be a certificate type. Extend the list of server + // supported algorithms to include the corresponding certificate algorithms. + serverAlgos := strings.Split(string(extPayload), ",") + for _, algo := range serverAlgos { + if certAlgo, ok := certificateAlgo(algo); ok { + serverAlgos = append(serverAlgos, certAlgo) + } + } + + keyAlgos := algorithmsForKeyFormat(keyFormat) + algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) + if err != nil { + // If there is no overlap, try the key anyway with the key format + // algorithm, to support servers that fail to list all supported + // algorithms. + return as, keyFormat + } + return as, algo +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { // Authentication is performed by sending an enquiry to test if a key is // acceptable to the remote. If the key is acceptable, the client will // attempt to authenticate with the valid key. If not the client will repeat @@ -201,7 +268,10 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand } var methods []string for _, signer := range signers { - ok, err := validateKey(signer.PublicKey(), user, c) + pub := signer.PublicKey() + as, algo := pickSignatureAlgorithm(signer, extensions) + + ok, err := validateKey(pub, algo, user, c) if err != nil { return authFailure, nil, err } @@ -209,13 +279,13 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand continue } - pub := signer.PublicKey() pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ + data := buildDataSignedForAuth(session, userAuthRequestMsg{ User: user, Service: serviceSSH, Method: cb.method(), - }, []byte(pub.Type()), pubKey)) + }, algo, pubKey) + sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) if err != nil { return authFailure, nil, err } @@ -229,7 +299,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand Service: serviceSSH, Method: cb.method(), HasSig: true, - Algoname: pub.Type(), + Algoname: algo, PubKey: pubKey, Sig: sig, } @@ -266,26 +336,25 @@ func containsMethod(methods []string, method string) bool { } // validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { +func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) { pubKey := key.Marshal() msg := publickeyAuthMsg{ User: user, Service: serviceSSH, Method: "publickey", HasSig: false, - Algoname: key.Type(), + Algoname: algo, PubKey: pubKey, } if err := c.writePacket(Marshal(&msg)); err != nil { return false, err } - return confirmKeyAck(key, c) + return confirmKeyAck(key, algo, c) } -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { +func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { pubKey := key.Marshal() - algoname := key.Type() for { packet, err := c.readPacket() @@ -302,14 +371,14 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { if err := Unmarshal(packet, &msg); err != nil { return false, err } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { + if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) { return false, nil } return true, nil case msgUserAuthFailure: return false, nil default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0]) } } } @@ -330,6 +399,7 @@ func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMet // along with a list of remaining authentication methods to try next and // an error if an unexpected response was received. func handleAuthResponse(c packetConn) (authResult, []string, error) { + gotMsgExtInfo := false for { packet, err := c.readPacket() if err != nil { @@ -341,6 +411,12 @@ func handleAuthResponse(c packetConn) (authResult, []string, error) { if err := handleBannerResponse(c, packet); err != nil { return authFailure, nil, err } + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + gotMsgExtInfo = true case msgUserAuthFailure: var msg userAuthFailureMsg if err := Unmarshal(packet, &msg); err != nil { @@ -380,10 +456,10 @@ func handleBannerResponse(c packetConn, packet []byte) error { // disabling echoing (e.g. for passwords), and return all the answers. // Challenge may be called multiple times in a single session. After // successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be +// questions, for which the name and instruction messages should be // printed. RFC 4256 section 3.3 details how the UI should behave for // both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) +type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error) // KeyboardInteractive returns an AuthMethod using a prompt/response // sequence controlled by the server. @@ -395,7 +471,7 @@ func (cb KeyboardInteractiveChallenge) method() string { return "keyboard-interactive" } -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { type initiateMsg struct { User string `sshtype:"50"` Service string @@ -412,6 +488,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, err } + gotMsgExtInfo := false for { packet, err := c.readPacket() if err != nil { @@ -425,6 +502,13 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, err } continue + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + gotMsgExtInfo = true + continue case msgUserAuthInfoRequest: // OK case msgUserAuthFailure: @@ -465,7 +549,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") } - answers, err := cb(msg.User, msg.Instruction, prompts, echos) + answers, err := cb(msg.Name, msg.Instruction, prompts, echos) if err != nil { return authFailure, nil, err } @@ -497,9 +581,9 @@ type retryableAuthMethod struct { maxTries int } -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) { for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) + ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions) if ok != authFailure || err != nil { // either success, partial success or error terminate return ok, methods, err } @@ -542,7 +626,7 @@ type gssAPIWithMICCallback struct { target string } -func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { m := &userAuthRequestMsg{ User: user, Service: serviceSSH, diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 5ae2275744..2a47a61ded 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -44,11 +44,11 @@ var preferredCiphers = []string{ // supportedKexAlgos specifies the supported key-exchange algorithms in // preference order. var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, // P384 and P521 are not constant-time yet, but since we don't // reuse ephemeral keys, using them for ECDH should be OK. kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, + kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1, } // serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden @@ -61,21 +61,21 @@ var serverForbiddenKexAlgos = map[string]struct{}{ // preferredKexAlgos specifies the default preference for key-exchange algorithms // in preference order. var preferredKexAlgos = []string{ - kexAlgoCurve25519SHA256, + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, + kexAlgoDH14SHA256, kexAlgoDH14SHA1, } // supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods // of authenticating servers) in preference order. var supportedHostKeyAlgos = []string{ - CertSigAlgoRSASHA2512v01, CertSigAlgoRSASHA2256v01, - CertSigAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertAlgoRSASHA512v01, CertAlgoRSASHA256v01, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - SigAlgoRSASHA2512, SigAlgoRSASHA2256, - SigAlgoRSA, KeyAlgoDSA, + KeyAlgoRSASHA512, KeyAlgoRSASHA256, + KeyAlgoRSA, KeyAlgoDSA, KeyAlgoED25519, } @@ -89,23 +89,33 @@ var supportedMACs = []string{ var supportedCompressions = []string{compressionNone} -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. +// hashFuncs keeps the mapping of supported signature algorithms to their +// respective hashes needed for signing and verification. var hashFuncs = map[string]crypto.Hash{ - SigAlgoRSA: crypto.SHA1, - SigAlgoRSASHA2256: crypto.SHA256, - SigAlgoRSASHA2512: crypto.SHA512, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertSigAlgoRSAv01: crypto.SHA1, - CertSigAlgoRSASHA2256v01: crypto.SHA256, - CertSigAlgoRSASHA2512v01: crypto.SHA512, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, + KeyAlgoRSA: crypto.SHA1, + KeyAlgoRSASHA256: crypto.SHA256, + KeyAlgoRSASHA512: crypto.SHA512, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + // KeyAlgoED25519 doesn't pre-hash. + KeyAlgoSKECDSA256: crypto.SHA256, + KeyAlgoSKED25519: crypto.SHA256, +} + +// algorithmsForKeyFormat returns the supported signature algorithms for a given +// public key format (PublicKey.Type), in order of preference. See RFC 8332, +// Section 2. See also the note in sendKexInit on backwards compatibility. +func algorithmsForKeyFormat(keyFormat string) []string { + switch keyFormat { + case KeyAlgoRSA: + return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA} + case CertAlgoRSAv01: + return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01} + default: + return []string{keyFormat} + } } // unexpectedMessageError results when the SSH message that we received didn't @@ -152,6 +162,11 @@ func (a *directionAlgorithms) rekeyBytes() int64 { return 1 << 30 } +var aeadCiphers = map[string]bool{ + gcmCipherID: true, + chacha20Poly1305ID: true, +} + type algorithms struct { kex string hostKey string @@ -187,14 +202,18 @@ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMs return } - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return + if !aeadCiphers[ctos.Cipher] { + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } } - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return + if !aeadCiphers[stoc.Cipher] { + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } } ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) @@ -278,8 +297,9 @@ func (c *Config) SetDefaults() { } // buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { +// possession of a private key. See RFC 4252, section 7. algo is the advertised +// algorithm, and may be a certificate type. +func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte { data := struct { Session []byte Type byte @@ -287,7 +307,7 @@ func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubK Service string Method string Sign bool - Algo []byte + Algo string PubKey []byte }{ sessionID, diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index 67b7322c05..f6bff60dc7 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -12,8 +12,9 @@ the multiplexed nature of SSH is exposed to users that wish to support others. References: - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 This package does not fall under the stability promise of the Go language itself, so its API may be changed when pressing needs arise. diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 05ad49c364..653dc4d2cf 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -455,21 +455,38 @@ func (t *handshakeTransport) sendKexInit() error { } io.ReadFull(rand.Reader, msg.Cookie[:]) - if len(t.hostKeys) > 0 { + isServer := len(t.hostKeys) > 0 + if isServer { for _, k := range t.hostKeys { - algo := k.PublicKey().Type() - switch algo { - case KeyAlgoRSA: - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, []string{SigAlgoRSASHA2512, SigAlgoRSASHA2256, SigAlgoRSA}...) - case CertAlgoRSAv01: - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, []string{CertSigAlgoRSASHA2512v01, CertSigAlgoRSASHA2256v01, CertSigAlgoRSAv01}...) - default: - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo) + // If k is an AlgorithmSigner, presume it supports all signature algorithms + // associated with the key format. (Ideally AlgorithmSigner would have a + // method to advertise supported algorithms, but it doesn't. This means that + // adding support for a new algorithm is a breaking change, as we will + // immediately negotiate it even if existing implementations don't support + // it. If that ever happens, we'll have to figure something out.) + // If k is not an AlgorithmSigner, we can only assume it only supports the + // algorithms that matches the key format. (This means that Sign can't pick + // a different default.) + keyFormat := k.PublicKey().Type() + if _, ok := k.(AlgorithmSigner); ok { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) + } else { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) } } } else { msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + + // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what + // algorithms the server supports for public key authentication. See RFC + // 8308, Section 2.1. + if firstKeyExchange := t.sessionID == nil; firstKeyExchange { + msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) + msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) + msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") + } } + packet := Marshal(msg) // writePacket destroys the contents, so save a copy. @@ -589,9 +606,9 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { var result *kexResult if len(t.hostKeys) > 0 { - result, err = t.server(kex, t.algorithms, &magics) + result, err = t.server(kex, &magics) } else { - result, err = t.client(kex, t.algorithms, &magics) + result, err = t.client(kex, &magics) } if err != nil { @@ -618,33 +635,52 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return nil } -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - kt := k.PublicKey().Type() - if kt == algs.hostKey { - hostKey = k - } else if signer, ok := k.(AlgorithmSigner); ok { - // Some signature algorithms don't show up as key types - // so we have to manually check for a compatible host key. - switch kt { - case KeyAlgoRSA: - if algs.hostKey == SigAlgoRSASHA2256 || algs.hostKey == SigAlgoRSASHA2512 { - hostKey = &rsaSigner{signer, algs.hostKey} - } - case CertAlgoRSAv01: - if algs.hostKey == CertSigAlgoRSASHA2256v01 || algs.hostKey == CertSigAlgoRSASHA2512v01 { - hostKey = &rsaSigner{signer, certToPrivAlgo(algs.hostKey)} - } +// algorithmSignerWrapper is an AlgorithmSigner that only supports the default +// key format algorithm. +// +// This is technically a violation of the AlgorithmSigner interface, but it +// should be unreachable given where we use this. Anyway, at least it returns an +// error instead of panicing or producing an incorrect signature. +type algorithmSignerWrapper struct { + Signer +} + +func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != underlyingAlgo(a.PublicKey().Type()) { + return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm") + } + return a.Sign(rand, data) +} + +func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { + for _, k := range hostKeys { + if algo == k.PublicKey().Type() { + return algorithmSignerWrapper{k} + } + k, ok := k.(AlgorithmSigner) + if !ok { + continue + } + for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) { + if algo == a { + return k } } } + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { + hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey) + if hostKey == nil { + return nil, errors.New("ssh: internal error: negotiated unsupported signature type") + } - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey) return r, err } -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { +func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { result, err := kex.Client(t.conn, t.config.Rand, magics) if err != nil { return nil, err @@ -655,7 +691,7 @@ func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics * return nil, err } - if err := verifyHostKeySignature(hostKey, algs.hostKey, result); err != nil { + if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil { return nil, err } diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index 766e929397..927a90cd46 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -20,12 +20,14 @@ import ( ) const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" + kexAlgoCurve25519SHA256 = "curve25519-sha256" // For the following kex only the client half contains a production // ready implementation. The server half only consists of a minimal @@ -75,8 +77,9 @@ func (m *handshakeMagics) write(w io.Writer) { // kexAlgorithm abstracts different key exchange algorithms. type kexAlgorithm interface { // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) + // with a hostkey. algo is the negotiated algorithm, and may + // be a certificate type. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error) // Client runs the client-side key agreement. Caller is // responsible for verifying the host key signature. @@ -86,6 +89,7 @@ type kexAlgorithm interface { // dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. type dhGroup struct { g, p, pMinus1 *big.Int + hashFunc crypto.Hash } func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { @@ -96,8 +100,6 @@ func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, } func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - var x *big.Int for { var err error @@ -132,7 +134,7 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha return nil, err } - h := hashFunc.New() + h := group.hashFunc.New() magics.write(h) writeString(h, kexDHReply.HostKey) writeInt(h, X) @@ -146,12 +148,11 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha K: K, HostKey: kexDHReply.HostKey, Signature: kexDHReply.Signature, - Hash: crypto.SHA1, + Hash: group.hashFunc, }, nil } -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return @@ -179,7 +180,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha hostKeyBytes := priv.PublicKey().Marshal() - h := hashFunc.New() + h := group.hashFunc.New() magics.write(h) writeString(h, hostKeyBytes) writeInt(h, kexDHInit.X) @@ -193,7 +194,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) + sig, err := signAndMarshal(priv, randSource, H, algo) if err != nil { return nil, err } @@ -211,7 +212,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha K: K, HostKey: hostKeyBytes, Signature: sig, - Hash: crypto.SHA1, + Hash: group.hashFunc, }, err } @@ -314,7 +315,7 @@ func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { return true } -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return nil, err @@ -359,7 +360,7 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) + sig, err := signAndMarshal(priv, rand, H, algo) if err != nil { return nil, err } @@ -384,39 +385,62 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p }, nil } +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + var kexAlgoMap = map[string]kexAlgorithm{} func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. + // This is the group called diffie-hellman-group1-sha1 in + // RFC 4253 and Oakley Group 2 in RFC 2409. p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + hashFunc: crypto.SHA1, } - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. + // This are the groups called diffie-hellman-group14-sha1 and + // diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268, + // and Oakley Group 14 in RFC 3526. p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + group14 := &dhGroup{ g: new(big.Int).SetInt64(2), p: p, pMinus1: new(big.Int).Sub(p, bigOne), } + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA1, + } + kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA256, + } + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} + kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{} kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} } -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +// curve25519sha256 implements the curve25519-sha256 (formerly known as +// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731. type curve25519sha256 struct{} type curve25519KeyPair struct { @@ -486,7 +510,7 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh }, nil } -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return @@ -527,7 +551,7 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh H := h.Sum(nil) - sig, err := signAndMarshal(priv, rand, H) + sig, err := signAndMarshal(priv, rand, H, algo) if err != nil { return nil, err } @@ -553,7 +577,6 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh // diffie-hellman-group-exchange-sha256 key agreement protocols, // as described in RFC 4419 type dhGEXSHA struct { - g, p *big.Int hashFunc crypto.Hash } @@ -563,14 +586,7 @@ const ( dhGroupExchangeMaximumBits = 8192 ) -func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 { - return nil, fmt.Errorf("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil -} - -func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { +func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { // Send GexRequest kexDHGexRequest := kexDHGexRequestMsg{ MinBits: dhGroupExchangeMinimumBits, @@ -587,35 +603,29 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake return nil, err } - var kexDHGexGroup kexDHGexGroupMsg - if err = Unmarshal(packet, &kexDHGexGroup); err != nil { + var msg kexDHGexGroupMsg + if err = Unmarshal(packet, &msg); err != nil { return nil, err } // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits - if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits { - return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen()) + if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits { + return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen()) } - gex.p = kexDHGexGroup.P - gex.g = kexDHGexGroup.G - - // Check if g is safe by verifing that g > 1 and g < p - 1 - one := big.NewInt(1) - var pMinusOne = &big.Int{} - pMinusOne.Sub(gex.p, one) - if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 { + // Check if g is safe by verifying that 1 < g < p-1 + pMinusOne := new(big.Int).Sub(msg.P, bigOne) + if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 { return nil, fmt.Errorf("ssh: server provided gex g is not safe") } // Send GexInit - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) + pHalf := new(big.Int).Rsh(msg.P, 1) x, err := rand.Int(randSource, pHalf) if err != nil { return nil, err } - X := new(big.Int).Exp(gex.g, x, gex.p) + X := new(big.Int).Exp(msg.G, x, msg.P) kexDHGexInit := kexDHGexInitMsg{ X: X, } @@ -634,13 +644,13 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake return nil, err } - kInt, err := gex.diffieHellman(kexDHGexReply.Y, x) - if err != nil { - return nil, err + if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") } + kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P) - // Check if k is safe by verifing that k > 1 and k < p - 1 - if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 { + // Check if k is safe by verifying that k > 1 and k < p - 1 + if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 { return nil, fmt.Errorf("ssh: derived k is not safe") } @@ -650,8 +660,8 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) + writeInt(h, msg.P) + writeInt(h, msg.G) writeInt(h, X) writeInt(h, kexDHGexReply.Y) K := make([]byte, intLength(kInt)) @@ -670,7 +680,7 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake // Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. // // This is a minimal implementation to satisfy the automated tests. -func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { // Receive GexRequest packet, err := c.readPacket() if err != nil { @@ -681,35 +691,17 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake return } - // smoosh the user's preferred size into our own limits - if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits - } - if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits - } - // fix min/max if they're inconsistent. technically, we could just pout - // and hang up, but there's no harm in giving them the benefit of the - // doubt and just picking a bitsize for them. - if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits { - kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits - } - if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits { - kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits - } - // Send GexGroup // This is the group called diffie-hellman-group14-sha1 in RFC // 4253 and Oakley Group 14 in RFC 3526. p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - gex.p = p - gex.g = big.NewInt(2) + g := big.NewInt(2) - kexDHGexGroup := kexDHGexGroupMsg{ - P: gex.p, - G: gex.g, + msg := &kexDHGexGroupMsg{ + P: p, + G: g, } - if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil { + if err := c.writePacket(Marshal(msg)); err != nil { return nil, err } @@ -723,19 +715,19 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake return } - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) + pHalf := new(big.Int).Rsh(p, 1) y, err := rand.Int(randSource, pHalf) if err != nil { return } + Y := new(big.Int).Exp(g, y, p) - Y := new(big.Int).Exp(gex.g, y, gex.p) - kInt, err := gex.diffieHellman(kexDHGexInit.X, y) - if err != nil { - return nil, err + pMinusOne := new(big.Int).Sub(p, bigOne) + if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") } + kInt := new(big.Int).Exp(kexDHGexInit.X, y, p) hostKeyBytes := priv.PublicKey().Marshal() @@ -745,8 +737,8 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) + writeInt(h, p) + writeInt(h, g) writeInt(h, kexDHGexInit.X) writeInt(h, Y) @@ -758,7 +750,7 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) + sig, err := signAndMarshal(priv, randSource, H, algo) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index c67d3a31cb..1c7de1a6dd 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -30,8 +30,9 @@ import ( "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" ) -// These constants represent the algorithm names for key types supported by this -// package. +// Public key algorithms names. These values can appear in PublicKey.Type, +// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner +// arguments. const ( KeyAlgoRSA = "ssh-rsa" KeyAlgoDSA = "ssh-dss" @@ -41,16 +42,21 @@ const ( KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" KeyAlgoED25519 = "ssh-ed25519" KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" + + // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not + // public key formats, so they can't appear as a PublicKey.Type. The + // corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. + KeyAlgoRSASHA256 = "rsa-sha2-256" + KeyAlgoRSASHA512 = "rsa-sha2-512" ) -// These constants represent non-default signature algorithms that are supported -// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See -// [PROTOCOL.agent] section 4.5.1 and -// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 const ( - SigAlgoRSA = "ssh-rsa" - SigAlgoRSASHA2256 = "rsa-sha2-256" - SigAlgoRSASHA2512 = "rsa-sha2-512" + // Deprecated: use KeyAlgoRSA. + SigAlgoRSA = KeyAlgoRSA + // Deprecated: use KeyAlgoRSASHA256. + SigAlgoRSASHA2256 = KeyAlgoRSASHA256 + // Deprecated: use KeyAlgoRSASHA512. + SigAlgoRSASHA2512 = KeyAlgoRSASHA512 ) // parsePubKey parses a public key of the given algorithm. @@ -70,7 +76,7 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err case KeyAlgoSKED25519: return parseSKEd25519(in) case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) + cert, err := parseCert(in, certKeyAlgoNames[algo]) if err != nil { return nil, nil, err } @@ -289,18 +295,21 @@ func MarshalAuthorizedKey(key PublicKey) []byte { return b.Bytes() } -// PublicKey is an abstraction of different types of public keys. +// PublicKey represents a public key using an unspecified algorithm. +// +// Some PublicKeys provided by this package also implement CryptoPublicKey. type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". + // Type returns the key format name, e.g. "ssh-rsa". Type() string - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. To unmarshal the returned data, use - // the ParsePublicKey function. + // Marshal returns the serialized key data in SSH wire format, with the name + // prefix. To unmarshal the returned data, use the ParsePublicKey function. Marshal() []byte - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. + // Verify that sig is a signature on the given data using this key. This + // method will hash the data appropriately first. sig.Format is allowed to + // be any signature algorithm compatible with the key type, the caller + // should check if it has more stringent requirements. Verify(data []byte, sig *Signature) error } @@ -311,25 +320,32 @@ type CryptoPublicKey interface { } // A Signer can create signatures that verify against a public key. +// +// Some Signers provided by this package also implement AlgorithmSigner. type Signer interface { - // PublicKey returns an associated PublicKey instance. + // PublicKey returns the associated PublicKey. PublicKey() PublicKey - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. + // Sign returns a signature for the given data. This method will hash the + // data appropriately first. The signature algorithm is expected to match + // the key format returned by the PublicKey.Type method (and not to be any + // alternative algorithm supported by the key format). Sign(rand io.Reader, data []byte) (*Signature, error) } -// A AlgorithmSigner is a Signer that also supports specifying a specific -// algorithm to use for signing. +// An AlgorithmSigner is a Signer that also supports specifying an algorithm to +// use for signing. +// +// An AlgorithmSigner can't advertise the algorithms it supports, so it should +// be prepared to be invoked with every algorithm supported by the public key +// format. type AlgorithmSigner interface { Signer - // SignWithAlgorithm is like Signer.Sign, but allows specification of a - // non-default signing algorithm. See the SigAlgo* constants in this - // package for signature algorithms supported by this package. Callers may - // pass an empty string for the algorithm in which case the AlgorithmSigner - // will use its default algorithm. + // SignWithAlgorithm is like Signer.Sign, but allows specifying a desired + // signing algorithm. Callers may pass an empty string for the algorithm in + // which case the AlgorithmSigner will use a default algorithm. This default + // doesn't currently control any behavior in this package. SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) } @@ -381,17 +397,11 @@ func (r *rsaPublicKey) Marshal() []byte { } func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - var hash crypto.Hash - switch sig.Format { - case SigAlgoRSA: - hash = crypto.SHA1 - case SigAlgoRSASHA2256: - hash = crypto.SHA256 - case SigAlgoRSASHA2512: - hash = crypto.SHA512 - default: + supportedAlgos := algorithmsForKeyFormat(r.Type()) + if !contains(supportedAlgos, sig.Format) { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) } + hash := hashFuncs[sig.Format] h := hash.New() h.Write(data) digest := h.Sum(nil) @@ -466,7 +476,7 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := crypto.SHA1.New() + h := hashFuncs[sig.Format].New() h.Write(data) digest := h.Sum(nil) @@ -499,7 +509,7 @@ func (k *dsaPrivateKey) PublicKey() PublicKey { } func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - return k.SignWithAlgorithm(rand, data, "") + return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) } func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { @@ -507,7 +517,7 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) } - h := crypto.SHA1.New() + h := hashFuncs[k.PublicKey().Type()].New() h.Write(data) digest := h.Sum(nil) r, s, err := dsa.Sign(rand, k.PrivateKey, digest) @@ -603,19 +613,6 @@ func supportedEllipticCurve(curve elliptic.Curve) bool { return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() } -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - // parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { var w struct { @@ -671,7 +668,7 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := ecHash(k.Curve).New() + h := hashFuncs[sig.Format].New() h.Write(data) digest := h.Sum(nil) @@ -775,7 +772,7 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := ecHash(k.Curve).New() + h := hashFuncs[sig.Format].New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -874,7 +871,7 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("invalid size %d for Ed25519 public key", l) } - h := sha256.New() + h := hashFuncs[sig.Format].New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -939,15 +936,6 @@ func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { return &dsaPrivateKey{key}, nil } -type rsaSigner struct { - AlgorithmSigner - defaultAlgorithm string -} - -func (s *rsaSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.AlgorithmSigner.SignWithAlgorithm(rand, data, s.defaultAlgorithm) -} - type wrappedSigner struct { signer crypto.Signer pubKey PublicKey @@ -970,44 +958,20 @@ func (s *wrappedSigner) PublicKey() PublicKey { } func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.SignWithAlgorithm(rand, data, "") + return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) } func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - var hashFunc crypto.Hash - - if _, ok := s.pubKey.(*rsaPublicKey); ok { - // RSA keys support a few hash functions determined by the requested signature algorithm - switch algorithm { - case "", SigAlgoRSA: - algorithm = SigAlgoRSA - hashFunc = crypto.SHA1 - case SigAlgoRSASHA2256: - hashFunc = crypto.SHA256 - case SigAlgoRSASHA2512: - hashFunc = crypto.SHA512 - default: - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - } else { - // The only supported algorithm for all other key types is the same as the type of the key - if algorithm == "" { - algorithm = s.pubKey.Type() - } else if algorithm != s.pubKey.Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } + if algorithm == "" { + algorithm = s.pubKey.Type() + } - switch key := s.pubKey.(type) { - case *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } + supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type()) + if !contains(supportedAlgos, algorithm) { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) } + hashFunc := hashFuncs[algorithm] var digest []byte if hashFunc != 0 { h := hashFunc.New() diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index ac41a4168b..19bc67c464 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -141,6 +141,14 @@ type serviceAcceptMsg struct { Service string `sshtype:"6"` } +// See RFC 8308, section 2.3 +const msgExtInfo = 7 + +type extInfoMsg struct { + NumExtensions uint32 `sshtype:"7"` + Payload []byte `ssh:"rest"` +} + // See RFC 4252, section 5. const msgUserAuthRequest = 50 @@ -180,11 +188,11 @@ const msgUserAuthInfoRequest = 60 const msgUserAuthInfoResponse = 61 type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` + Name string `sshtype:"60"` + Instruction string + Language string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` } // See RFC 4254, section 5.1. @@ -782,6 +790,8 @@ func decode(packet []byte) (interface{}, error) { msg = new(serviceRequestMsg) case msgServiceAccept: msg = new(serviceAcceptMsg) + case msgExtInfo: + msg = new(extInfoMsg) case msgKexInit: msg = new(kexInitMsg) case msgKexDHInit: @@ -843,6 +853,7 @@ var packetTypeNames = map[byte]string{ msgDisconnect: "disconnectMsg", msgServiceRequest: "serviceRequestMsg", msgServiceAccept: "serviceAcceptMsg", + msgExtInfo: "extInfoMsg", msgKexInit: "kexInitMsg", msgKexDHInit: "kexDHInitMsg", msgKexDHReply: "kexDHReplyMsg", diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 8e013651cb..70045bdfd8 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -120,7 +120,7 @@ type ServerConfig struct { } // AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server +// key exists with the same public key format, it is replaced. Each server // config must have at least one host key. func (s *ServerConfig) AddHostKey(key Signer) { for i, k := range s.hostKeys { @@ -212,9 +212,10 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha } // signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) +// and serializes the result in SSH wire format. algo is the negotiate +// algorithm and may be a certificate type. +func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) { + sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) if err != nil { return nil, err } @@ -284,7 +285,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) func isAcceptableAlgo(algo string) bool { switch algo { - case SigAlgoRSA, SigAlgoRSASHA2256, SigAlgoRSASHA2512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, + case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: return true } @@ -553,6 +554,7 @@ userAuthLoop: if !ok || len(payload) > 0 { return nil, parseError(msgUserAuthRequest) } + // Ensure the public key algo and signature algo // are supported. Compare the private key // algorithm name that corresponds to algo with @@ -562,7 +564,12 @@ userAuthLoop: authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) + if underlyingAlgo(algo) != sig.Format { + authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) + break + } + + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData) if err := pubKey.Verify(signedData, sig); err != nil { return nil, err @@ -634,7 +641,7 @@ userAuthLoop: authFailures++ if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { - // If we have hit the max attemps, don't bother sending the + // If we have hit the max attempts, don't bother sending the // final SSH_MSG_USERAUTH_FAILURE message, since there are // no more authentication methods which can be attempted, // and this message may cause the client to re-attempt @@ -694,7 +701,7 @@ type sshClientKeyboardInteractive struct { *connection } -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { +func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) { if len(questions) != len(echos) { return nil, errors.New("ssh: echos and questions must have equal length") } @@ -706,6 +713,7 @@ func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, quest } if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Name: name, Instruction: instruction, NumPrompts: uint32(len(questions)), Prompts: prompts, diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go index d3321f6b78..eca31a22d5 100644 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ b/vendor/golang.org/x/crypto/ssh/session.go @@ -85,6 +85,7 @@ const ( IXANY = 39 IXOFF = 40 IMAXBEL = 41 + IUTF8 = 42 // RFC 8160 ISIG = 50 ICANON = 51 XCASE = 52 diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go index 49ddc2e7de..acf5a21bbb 100644 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -238,15 +238,19 @@ var ( // (to setup server->client keys) or clientKeys (for client->server keys). func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] iv := make([]byte, cipherMode.ivSize) key := make([]byte, cipherMode.keySize) - macKey := make([]byte, macMode.keySize) generateKeyMaterial(iv, d.ivTag, kex) generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) + + var macKey []byte + if !aeadCiphers[algs.Cipher] { + macMode := macModes[algs.MAC] + macKey = make([]byte, macMode.keySize) + generateKeyMaterial(macKey, d.macKeyTag, kex) + } return cipherModes[algs.Cipher].create(key, iv, macKey, algs) } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index ea9836d28b..de6d41c238 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -801,16 +801,31 @@ func (ac *addrConn) connect() error { return nil } +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. // - If true, it updates ac.addrs and returns true. The ac will keep using @@ -827,6 +842,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } + if equalAddresses(ac.addrs, addrs) { + return true + } + if ac.state == connectivity.Connecting { return false } @@ -1219,6 +1238,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() defer connClosed.Fire() + defer hcancel() if !hcStarted || hctx.Err() != nil { // We didn't start the health check or set the state to READY, so // no need to do anything else here. @@ -1229,7 +1249,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state, since there may be a new transport in this addrConn. return } - hcancel() ac.transport = nil // Refresh the name resolver ac.cc.resolveNow(resolver.ResolveNowOptions{}) @@ -1252,6 +1271,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. + hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7d..18e530fc90 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 8394d252df..244f4b081d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -137,6 +137,7 @@ type earlyAbortStream struct { streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -786,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 0956b500c1..45d7bd145e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,7 +21,6 @@ package transport import ( "bytes" "context" - "errors" "fmt" "io" "math" @@ -53,10 +52,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -449,6 +448,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) return false } @@ -522,14 +522,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if httpMethod != http.MethodPost { t.mu.Unlock() + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + logger.Infof("transport: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() return false @@ -550,6 +552,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) return false } @@ -931,11 +934,25 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } + + if s.getState() == streamDone { + return t.streamContextErr(s) + } + s.hdrMu.Lock() if md.Len() > 0 { if s.header.Len() > 0 { @@ -946,7 +963,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } if err := t.writeHeaderLocked(s); err != nil { s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } s.hdrMu.Unlock() return nil @@ -1062,23 +1079,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -1088,12 +1094,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1229,10 +1230,6 @@ func (t *http2Server) Close() { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1254,6 +1251,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1273,6 +1275,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go index 6f30c8016e..9873da268d 100644 --- a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go @@ -122,8 +122,11 @@ func matchersFromPermissions(permissions []*v3rbacpb.Permission) ([]matcher, err } matchers = append(matchers, ¬Matcher{matcherToNot: mList[0]}) case *v3rbacpb.Permission_Metadata: - // Not supported in gRPC RBAC currently - a permission typed as - // Metadata in the initial config will be a no-op. + // Never matches - so no-op if not inverted, always match if + // inverted. + if permission.GetMetadata().GetInvert() { // Test metadata being no-op and also metadata with invert always matching + matchers = append(matchers, &alwaysMatcher{}) + } case *v3rbacpb.Permission_RequestedServerName: // Not supported in gRPC RBAC currently - a permission typed as // requested server name in the initial config will be a no-op. diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 96431a058b..65de84b300 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1801,12 +1801,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1818,8 +1832,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1833,6 +1853,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index be51405358..236fc17ec3 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -1381,8 +1381,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index f17192e6a3..5bc03f9b36 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.46.2" +const Version = "1.47.0" diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 0be796c47b..d057ed66a5 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal/buffer" xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -36,6 +37,7 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -270,6 +272,52 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc return provider, nil } +func outlierDetectionToConfig(od *xdsresource.OutlierDetection) *outlierdetection.LBConfig { // Already validated - no need to return error + if od == nil { + // "If the outlier_detection field is not set in the Cluster message, a + // "no-op" outlier_detection config will be generated, with interval set + // to the maximum possible value and all other fields unset." - A50 + return &outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + } + } + + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var sre *outlierdetection.SuccessRateEjection + if od.EnforcingSuccessRate != 0 { + sre = &outlierdetection.SuccessRateEjection{ + StdevFactor: od.SuccessRateStdevFactor, + EnforcementPercentage: od.EnforcingSuccessRate, + MinimumHosts: od.SuccessRateMinimumHosts, + RequestVolume: od.SuccessRateRequestVolume, + } + } + + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var fpe *outlierdetection.FailurePercentageEjection + if od.EnforcingFailurePercentage != 0 { + fpe = &outlierdetection.FailurePercentageEjection{ + Threshold: od.FailurePercentageThreshold, + EnforcementPercentage: od.EnforcingFailurePercentage, + MinimumHosts: od.FailurePercentageMinimumHosts, + RequestVolume: od.FailurePercentageRequestVolume, + } + } + + return &outlierdetection.LBConfig{ + Interval: od.Interval, + BaseEjectionTime: od.BaseEjectionTime, + MaxEjectionTime: od.MaxEjectionTime, + MaxEjectionPercent: od.MaxEjectionPercent, + SuccessRateEjection: sre, + FailurePercentageEjection: fpe, + } +} + // handleWatchUpdate handles a watch update from the xDS Client. Good updates // lead to clientConn updates being invoked on the underlying cluster_resolver balancer. func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { @@ -342,6 +390,9 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { default: b.logger.Infof("unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) } + if envconfig.XDSOutlierDetection { + dms[i].OutlierDetection = outlierDetectionToConfig(cu.OutlierDetection) + } } lbCfg := &clusterresolver.LBConfig{ DiscoveryMechanisms: dms, diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go index a10d8d772f..234511a45d 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -24,7 +24,12 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") +const maxDepth = 16 + +var ( + errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") + errExceedsMaxDepth = errors.New("aggregate cluster graph exceeds max depth") +) // clusterHandlerUpdate wraps the information received from the registered CDS // watcher. A non-nil error is propagated to the underlying cluster_resolver @@ -54,9 +59,10 @@ type clusterHandler struct { // A mutex to protect entire tree of clusters. clusterMutex sync.Mutex - root *clusterNode rootClusterName string + createdClusters map[string]*clusterNode + // A way to ping CDS Balancer about any updates or errors to a Node in the // tree. This will either get called from this handler constructing an // update or from a child with an error. Capacity of one as the only update @@ -66,39 +72,48 @@ type clusterHandler struct { func newClusterHandler(parent *cdsBalancer) *clusterHandler { return &clusterHandler{ - parent: parent, - updateChannel: make(chan clusterHandlerUpdate, 1), + parent: parent, + updateChannel: make(chan clusterHandlerUpdate, 1), + createdClusters: make(map[string]*clusterNode), } } func (ch *clusterHandler) updateRootCluster(rootClusterName string) { ch.clusterMutex.Lock() defer ch.clusterMutex.Unlock() - if ch.root == nil { + if ch.createdClusters[ch.rootClusterName] == nil { // Construct a root node on first update. - ch.root = createClusterNode(rootClusterName, ch.parent.xdsClient, ch) + createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) ch.rootClusterName = rootClusterName return } // Check if root cluster was changed. If it was, delete old one and start // new one, if not do nothing. if rootClusterName != ch.rootClusterName { - ch.root.delete() - ch.root = createClusterNode(rootClusterName, ch.parent.xdsClient, ch) + ch.createdClusters[ch.rootClusterName].delete() + createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) ch.rootClusterName = rootClusterName } } // This function tries to construct a cluster update to send to CDS. func (ch *clusterHandler) constructClusterUpdate() { - if ch.root == nil { + if ch.createdClusters[ch.rootClusterName] == nil { // If root is nil, this handler is closed, ignore the update. return } - clusterUpdate, err := ch.root.constructClusterUpdate() + clusterUpdate, err := ch.createdClusters[ch.rootClusterName].constructClusterUpdate(make(map[string]bool)) if err != nil { - // If there was an error received no op, as this simply means one of the - // children hasn't received an update yet. + // If there was an error received no op, as this can mean one of the + // children hasn't received an update yet, or the graph continued to + // stay in an error state. If the graph continues to stay in an error + // state, no new error needs to be written to the update buffer as that + // would be redundant information. + return + } + if clusterUpdate == nil { + // This means that there was an aggregated cluster with no EDS or DNS as + // leaf nodes. No update to be written. return } // For a ClusterUpdate, the only update CDS cares about is the most @@ -109,8 +124,8 @@ func (ch *clusterHandler) constructClusterUpdate() { default: } ch.updateChannel <- clusterHandlerUpdate{ - securityCfg: ch.root.clusterUpdate.SecurityCfg, - lbPolicy: ch.root.clusterUpdate.LBPolicy, + securityCfg: ch.createdClusters[ch.rootClusterName].clusterUpdate.SecurityCfg, + lbPolicy: ch.createdClusters[ch.rootClusterName].clusterUpdate.LBPolicy, updates: clusterUpdate, } } @@ -120,11 +135,10 @@ func (ch *clusterHandler) constructClusterUpdate() { func (ch *clusterHandler) close() { ch.clusterMutex.Lock() defer ch.clusterMutex.Unlock() - if ch.root == nil { + if ch.createdClusters[ch.rootClusterName] == nil { return } - ch.root.delete() - ch.root = nil + ch.createdClusters[ch.rootClusterName].delete() ch.rootClusterName = "" } @@ -136,7 +150,7 @@ type clusterNode struct { cancelFunc func() // A list of children, as the Node can be an aggregate Cluster. - children []*clusterNode + children []string // A ClusterUpdate in order to build a list of cluster updates for CDS to // send down to child XdsClusterResolverLoadBalancingPolicy. @@ -149,13 +163,30 @@ type clusterNode struct { receivedUpdate bool clusterHandler *clusterHandler + + depth int32 + refCount int32 + + // maxDepthErr is set if this cluster node is an aggregate cluster and has a + // child that causes the graph to exceed the maximum depth allowed. This is + // used to show a cluster graph as being in an error state when it constructs + // a cluster update. + maxDepthErr error } // CreateClusterNode creates a cluster node from a given clusterName. This will // also start the watch for that cluster. -func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler) *clusterNode { +func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler, depth int32) { + // If the cluster has already been created, simply return, which ignores + // duplicates. + if topLevelHandler.createdClusters[clusterName] != nil { + topLevelHandler.createdClusters[clusterName].refCount++ + return + } c := &clusterNode{ clusterHandler: topLevelHandler, + depth: depth, + refCount: 1, } // Communicate with the xds client here. topLevelHandler.parent.logger.Infof("CDS watch started on %v", clusterName) @@ -164,25 +195,43 @@ func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLev topLevelHandler.parent.logger.Infof("CDS watch canceled on %v", clusterName) cancel() } - return c + topLevelHandler.createdClusters[clusterName] = c } // This function cancels the cluster watch on the cluster and all of it's // children. func (c *clusterNode) delete() { - c.cancelFunc() - for _, child := range c.children { - child.delete() + c.refCount-- + if c.refCount == 0 { + c.cancelFunc() + delete(c.clusterHandler.createdClusters, c.clusterUpdate.ClusterName) + for _, child := range c.children { + if c.clusterHandler.createdClusters[child] != nil { + c.clusterHandler.createdClusters[child].delete() + } + } } } // Construct cluster update (potentially a list of ClusterUpdates) for a node. -func (c *clusterNode) constructClusterUpdate() ([]xdsresource.ClusterUpdate, error) { +func (c *clusterNode) constructClusterUpdate(clustersSeen map[string]bool) ([]xdsresource.ClusterUpdate, error) { // If the cluster has not yet received an update, the cluster update is not // yet ready. if !c.receivedUpdate { return nil, errNotReceivedUpdate } + if c.maxDepthErr != nil { + return nil, c.maxDepthErr + } + // Ignore duplicates. It's ok to ignore duplicates because the second + // occurrence of a cluster will never be used. I.e. in [C, D, C], the second + // C will never be used (the only way to fall back to lower priority D is if + // C is down, which means second C will never be chosen). Thus, [C, D, C] is + // logically equivalent to [C, D]. + if clustersSeen[c.clusterUpdate.ClusterName] { + return []xdsresource.ClusterUpdate{}, nil + } + clustersSeen[c.clusterUpdate.ClusterName] = true // Base case - LogicalDNS or EDS. Both of these cluster types will be tied // to a single ClusterUpdate. @@ -194,7 +243,7 @@ func (c *clusterNode) constructClusterUpdate() ([]xdsresource.ClusterUpdate, err // it's children. var childrenUpdates []xdsresource.ClusterUpdate for _, child := range c.children { - childUpdateList, err := child.constructClusterUpdate() + childUpdateList, err := c.clusterHandler.createdClusters[child].constructClusterUpdate(clustersSeen) if err != nil { return nil, err } @@ -219,6 +268,8 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er default: } c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: err} + c.receivedUpdate = false + c.maxDepthErr = nil return } @@ -233,9 +284,10 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er // cluster. if clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { for _, child := range c.children { - child.delete() + c.clusterHandler.createdClusters[child].delete() } c.children = nil + c.maxDepthErr = nil // This is an update in the one leaf node, should try to send an update // to the parent CDS balancer. // @@ -248,6 +300,22 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er } // Aggregate cluster handling. + if len(clusterUpdate.PrioritizedClusterNames) >= 1 { + if c.depth == maxDepth-1 { + // For a ClusterUpdate, the only update CDS cares about is the most + // recent one, so opportunistically drain the update channel before + // sending the new update. + select { + case <-c.clusterHandler.updateChannel: + default: + } + c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: errExceedsMaxDepth} + c.children = []string{} + c.maxDepthErr = errExceedsMaxDepth + return + } + } + newChildren := make(map[string]bool) for _, childName := range clusterUpdate.PrioritizedClusterNames { newChildren[childName] = true @@ -261,59 +329,42 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er // the update to build (ex. if a child is created and a watch is started, // that child hasn't received an update yet due to the mutex lock on this // callback). - var createdChild, deletedChild bool + var createdChild bool // This map will represent the current children of the cluster. It will be // first added to in order to represent the new children. It will then have - // any children deleted that are no longer present. Then, from the cluster - // update received, will be used to construct the new child list. - mapCurrentChildren := make(map[string]*clusterNode) + // any children deleted that are no longer present. + mapCurrentChildren := make(map[string]bool) for _, child := range c.children { - mapCurrentChildren[child.clusterUpdate.ClusterName] = child + mapCurrentChildren[child] = true } // Add and construct any new child nodes. for child := range newChildren { if _, inChildrenAlready := mapCurrentChildren[child]; !inChildrenAlready { - createdChild = true - mapCurrentChildren[child] = createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler) + createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler, c.depth+1) } } // Delete any child nodes no longer in the aggregate cluster's children. for child := range mapCurrentChildren { if _, stillAChild := newChildren[child]; !stillAChild { - deletedChild = true - mapCurrentChildren[child].delete() + c.clusterHandler.createdClusters[child].delete() delete(mapCurrentChildren, child) } } - // The order of the children list matters, so use the clusterUpdate from - // xdsclient as the ordering, and use that logical ordering for the new - // children list. This will be a mixture of child nodes which are all - // already constructed in the mapCurrentChildrenMap. - var children = make([]*clusterNode, 0, len(clusterUpdate.PrioritizedClusterNames)) - - for _, orderedChild := range clusterUpdate.PrioritizedClusterNames { - // The cluster's already have watches started for them in xds client, so - // you can use these pointers to construct the new children list, you - // just have to put them in the correct order using the original cluster - // update. - currentChild := mapCurrentChildren[orderedChild] - children = append(children, currentChild) - } - - c.children = children + c.children = clusterUpdate.PrioritizedClusterNames + c.maxDepthErr = nil // If the cluster is an aggregate cluster, if this callback created any new // child cluster nodes, then there's no possibility for a full cluster // update to successfully build, as those created children will not have - // received an update yet. However, if there was simply a child deleted, - // then there is a possibility that it will have a full cluster update to - // build and also will have a changed overall cluster update from the - // deleted child. - if deletedChild && !createdChild { + // received an update yet. Even if this update did not delete a child, there + // is still a possibility for the cluster update to build, as the aggregate + // cluster can ignore duplicated children and thus the update can fill out + // the full cluster update tree. + if !createdChild { c.clusterHandler.constructClusterUpdate() } } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go index 363afd03ab..26e2812d2f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer/roundrobin" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) @@ -102,6 +103,9 @@ type DiscoveryMechanism struct { // DNSHostname is the DNS name to resolve in "host:port" form. For type // LOGICAL_DNS only. DNSHostname string `json:"dnsHostname,omitempty"` + // OutlierDetection is the Outlier Detection LB configuration for this + // priority. + OutlierDetection *outlierdetection.LBConfig `json:"outlierDetection,omitempty"` } // Equal returns whether the DiscoveryMechanism is the same with the parameter. @@ -117,6 +121,8 @@ func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { return false case dm.DNSHostname != b.DNSHostname: return false + case !dm.OutlierDetection.EqualIgnoringChildPolicy(b.OutlierDetection): + return false } if dm.LoadReportingServer == nil && b.LoadReportingServer == nil { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go new file mode 100644 index 0000000000..da83112631 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go @@ -0,0 +1,182 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package outlierdetection implements a balancer that implements +// Outlier Detection. +package outlierdetection + +import ( + "time" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// SuccessRateEjection is parameters for the success rate ejection algorithm. +// This algorithm monitors the request success rate for all endpoints and ejects +// individual endpoints whose success rates are statistical outliers. +type SuccessRateEjection struct { + // StddevFactor is used to determine the ejection threshold for + // success rate outlier ejection. The ejection threshold is the difference + // between the mean success rate, and the product of this factor and the + // standard deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + StdevFactor uint32 `json:"stdevFactor,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually ejected + // when an outlier status is detected through success rate statistics. This + // setting can be used to disable ejection or to ramp it up slowly. Defaults + // to 100. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the number of hosts in a cluster that must have enough + // request volume to detect success rate outliers. If the number of hosts is + // less than this setting, outlier detection via success rate statistics is + // not performed for any host in the cluster. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // include this host in success rate based outlier detection. If the volume + // is lower than this setting, outlier detection via success rate statistics + // is not performed for that host. Defaults to 100. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// Equal returns whether the SuccessRateEjection is the same with the parameter. +func (sre *SuccessRateEjection) Equal(sre2 *SuccessRateEjection) bool { + if sre == nil && sre2 == nil { + return true + } + if (sre != nil) != (sre2 != nil) { + return false + } + if sre.StdevFactor != sre2.StdevFactor { + return false + } + if sre.EnforcementPercentage != sre2.EnforcementPercentage { + return false + } + if sre.MinimumHosts != sre2.MinimumHosts { + return false + } + return sre.RequestVolume == sre2.RequestVolume +} + +// FailurePercentageEjection is parameters for the failure percentage algorithm. +// This algorithm ejects individual endpoints whose failure rate is greater than +// some threshold, independently of any other endpoint. +type FailurePercentageEjection struct { + // Threshold is the failure percentage to use when determining failure + // percentage-based outlier detection. If the failure percentage of a given + // host is greater than or equal to this value, it will be ejected. Defaults + // to 85. + Threshold uint32 `json:"threshold,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually + // ejected when an outlier status is detected through failure percentage + // statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the minimum number of hosts in a cluster in order to + // perform failure percentage-based ejection. If the total number of hosts + // in the cluster is less than this value, failure percentage-based ejection + // will not be performed. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // perform failure percentage-based ejection for this host. If the volume is + // lower than this setting, failure percentage-based ejection will not be + // performed for this host. Defaults to 50. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// Equal returns whether the FailurePercentageEjection is the same with the +// parameter. +func (fpe *FailurePercentageEjection) Equal(fpe2 *FailurePercentageEjection) bool { + if fpe == nil && fpe2 == nil { + return true + } + if (fpe != nil) != (fpe2 != nil) { + return false + } + if fpe.Threshold != fpe2.Threshold { + return false + } + if fpe.EnforcementPercentage != fpe2.EnforcementPercentage { + return false + } + if fpe.MinimumHosts != fpe2.MinimumHosts { + return false + } + return fpe.RequestVolume == fpe2.RequestVolume +} + +// LBConfig is the config for the outlier detection balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // Interval is the time interval between ejection analysis sweeps. This can + // result in both new ejections as well as addresses being returned to + // service. Defaults to 10s. + Interval time.Duration `json:"interval,omitempty"` + // BaseEjectionTime is the base time that a host is ejected for. The real + // time is equal to the base time multiplied by the number of times the host + // has been ejected and is capped by MaxEjectionTime. Defaults to 30s. + BaseEjectionTime time.Duration `json:"baseEjectionTime,omitempty"` + // MaxEjectionTime is the maximum time that an address is ejected for. If + // not specified, the default value (300s) or the BaseEjectionTime value is + // applied, whichever is larger. + MaxEjectionTime time.Duration `json:"maxEjectionTime,omitempty"` + // MaxEjectionPercent is the maximum % of an upstream cluster that can be + // ejected due to outlier detection. Defaults to 10% but will eject at least + // one host regardless of the value. + MaxEjectionPercent uint32 `json:"maxEjectionPercent,omitempty"` + // SuccessRateEjection is the parameters for the success rate ejection + // algorithm. If set, success rate ejections will be performed. + SuccessRateEjection *SuccessRateEjection `json:"successRateEjection,omitempty"` + // FailurePercentageEjection is the parameters for the failure percentage + // algorithm. If set, failure rate ejections will be performed. + FailurePercentageEjection *FailurePercentageEjection `json:"failurePercentageEjection,omitempty"` + // ChildPolicy is the config for the child policy. + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// EqualIgnoringChildPolicy returns whether the LBConfig is same with the +// parameter outside of the child policy, only comparing the Outlier Detection +// specific configuration. +func (lbc *LBConfig) EqualIgnoringChildPolicy(lbc2 *LBConfig) bool { + if lbc == nil && lbc2 == nil { + return true + } + if (lbc != nil) != (lbc2 != nil) { + return false + } + if lbc.Interval != lbc2.Interval { + return false + } + if lbc.BaseEjectionTime != lbc2.BaseEjectionTime { + return false + } + if lbc.MaxEjectionTime != lbc2.MaxEjectionTime { + return false + } + if lbc.MaxEjectionPercent != lbc2.MaxEjectionPercent { + return false + } + if !lbc.SuccessRateEjection.Equal(lbc2.SuccessRateEjection) { + return false + } + return lbc.FailurePercentageEjection.Equal(lbc2.FailurePercentageEjection) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go index 98fd0672af..672f10122f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -100,12 +100,6 @@ type priorityBalancer struct { childToPriority map[string]int // children is a map from child name to sub-balancers. children map[string]*childBalancer - // The timer to give a priority some time to connect. And if the priority - // doesn't go into Ready/Failure, the next priority will be started. - // - // One timer is enough because there can be at most one priority in init - // state. - priorityInitTimer *timerWrapper } func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { @@ -176,7 +170,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err } // Sync the states of all children to the new updated priorities. This // include starting/stopping child balancers when necessary. - b.syncPriority() + b.syncPriority(true) return nil } @@ -198,27 +192,17 @@ func (b *priorityBalancer) Close() { // Clear states of the current child in use, so if there's a race in picker // update, it will be dropped. b.childInUse = "" - b.stopPriorityInitTimer() + // Stop the child policies, this is necessary to stop the init timers in the + // children. + for _, child := range b.children { + child.stop() + } } func (b *priorityBalancer) ExitIdle() { b.bg.ExitIdle() } -// stopPriorityInitTimer stops the priorityInitTimer if it's not nil, and set it -// to nil. -// -// Caller must hold b.mu. -func (b *priorityBalancer) stopPriorityInitTimer() { - timerW := b.priorityInitTimer - if timerW == nil { - return - } - b.priorityInitTimer = nil - timerW.stopped = true - timerW.timer.Stop() -} - // UpdateState implements balancergroup.BalancerStateAggregator interface. The // balancer group sends new connectivity state and picker here. func (b *priorityBalancer) UpdateState(childName string, state balancer.State) { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go index 600705da01..c00a56b8f9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go @@ -19,6 +19,8 @@ package priority import ( + "time" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" @@ -36,7 +38,16 @@ type childBalancer struct { rState resolver.State started bool - state balancer.State + // This is set when the child reports TransientFailure, and unset when it + // reports Ready or Idle. It is used to decide whether the failover timer + // should start when the child is transitioning into Connecting. The timer + // will be restarted if the child has not reported TF more recently than it + // reported Ready or Idle. + reportedTF bool + state balancer.State + // The timer to give a priority some time to connect. And if the priority + // doesn't go into Ready/Failure, the next priority will be started. + initTimer *timerWrapper } // newChildBalancer creates a child balancer place holder, but doesn't @@ -79,6 +90,7 @@ func (cb *childBalancer) start() { } cb.started = true cb.parent.bg.Add(cb.name, cb.bb) + cb.startInitTimer() } // sendUpdate sends the addresses and config to the child balancer. @@ -103,10 +115,46 @@ func (cb *childBalancer) stop() { if !cb.started { return } + cb.stopInitTimer() cb.parent.bg.Remove(cb.name) cb.started = false cb.state = balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), } + // Clear child.reportedTF, so that if this child is started later, it will + // be given time to connect. + cb.reportedTF = false +} + +func (cb *childBalancer) startInitTimer() { + if cb.initTimer != nil { + return + } + // Need this local variable to capture timerW in the AfterFunc closure + // to check the stopped boolean. + timerW := &timerWrapper{} + cb.initTimer = timerW + timerW.timer = time.AfterFunc(DefaultPriorityInitTimeout, func() { + cb.parent.mu.Lock() + defer cb.parent.mu.Unlock() + if timerW.stopped { + return + } + cb.initTimer = nil + // Re-sync the priority. This will switch to the next priority if + // there's any. Note that it's important sync() is called after setting + // initTimer to nil. + cb.parent.syncPriority(false) + }) +} + +func (cb *childBalancer) stopInitTimer() { + timerW := cb.initTimer + if timerW == nil { + return + } + cb.initTimer = nil + timerW.stopped = true + timerW.timer.Stop() } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go index 3a18f6e10d..2487c26260 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go @@ -36,9 +36,10 @@ var ( DefaultPriorityInitTimeout = 10 * time.Second ) -// syncPriority handles priority after a config update. It makes sure the -// balancer state (started or not) is in sync with the priorities (even in -// tricky cases where a child is moved from a priority to another). +// syncPriority handles priority after a config update or a child balancer +// connectivity state update. It makes sure the balancer state (started or not) +// is in sync with the priorities (even in tricky cases where a child is moved +// from a priority to another). // // It's guaranteed that after this function returns: // - If some child is READY, it is childInUse, and all lower priorities are @@ -53,10 +54,13 @@ var ( // set parent ClientConn to TransientFailure // - Otherwise, Scan all children from p0, and check balancer stats: // - For any of the following cases: -// - If balancer is not started (not built), this is either a new child -// with high priority, or a new builder for an existing child. -// - If balancer is READY -// - If this is the lowest priority +// - If balancer is not started (not built), this is either a new child with +// high priority, or a new builder for an existing child. +// - If balancer is Connecting and has non-nil initTimer (meaning it +// transitioned from Ready or Idle to connecting, not from TF, so we +// should give it init-time to connect). +// - If balancer is READY +// - If this is the lowest priority // - do the following: // - if this is not the old childInUse, override picker so old picker is no // longer used. @@ -64,14 +68,11 @@ var ( // - forward the new addresses and config // // Caller must hold b.mu. -func (b *priorityBalancer) syncPriority() { +func (b *priorityBalancer) syncPriority(forceUpdate bool) { // Everything was removed by the update. if len(b.priorities) == 0 { b.childInUse = "" b.priorityInUse = 0 - // Stop the init timer. This can happen if the only priority is removed - // shortly after it's added. - b.stopPriorityInitTimer() b.cc.UpdateState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(ErrAllPrioritiesRemoved), @@ -89,6 +90,7 @@ func (b *priorityBalancer) syncPriority() { if !child.started || child.state.ConnectivityState == connectivity.Ready || child.state.ConnectivityState == connectivity.Idle || + (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || p == len(b.priorities)-1 { if b.childInUse != "" && b.childInUse != child.name { // childInUse was set and is different from this child, will @@ -97,8 +99,16 @@ func (b *priorityBalancer) syncPriority() { b.cc.UpdateState(child.state) } b.logger.Infof("switching to (%q, %v) in syncPriority", child.name, p) + oldChildInUse := b.childInUse b.switchToChild(child, p) - child.sendUpdate() + if b.childInUse != oldChildInUse || forceUpdate { + // If child is switched, send the update to the new child. + // + // Or if forceUpdate is true (when this is triggered by a + // ClientConn update), because the ClientConn update might + // contain changes for this child. + child.sendUpdate() + } break } } @@ -123,8 +133,7 @@ func (b *priorityBalancer) stopSubBalancersLowerThanPriority(p int) { // - stop all child with lower priorities // - if childInUse is not this child // - set childInUse to this child -// - stops init timer -// - if this child is not started, start it, and start a init timer +// - if this child is not started, start it // // Note that it does NOT send the current child state (picker) to the parent // ClientConn. The caller needs to send it if necessary. @@ -156,33 +165,8 @@ func (b *priorityBalancer) switchToChild(child *childBalancer, priority int) { b.childInUse = child.name b.priorityInUse = priority - // Init timer is always for childInUse. Since we are switching to a - // different child, we will stop the init timer no matter what. If this - // child is not started, we will start the init timer later. - b.stopPriorityInitTimer() - if !child.started { child.start() - // Need this local variable to capture timerW in the AfterFunc closure - // to check the stopped boolean. - timerW := &timerWrapper{} - b.priorityInitTimer = timerW - timerW.timer = time.AfterFunc(DefaultPriorityInitTimeout, func() { - b.mu.Lock() - defer b.mu.Unlock() - if timerW.stopped { - return - } - b.priorityInitTimer = nil - // Switch to the next priority if there's any. - if pNext := priority + 1; pNext < len(b.priorities) { - nameNext := b.priorities[pNext] - if childNext, ok := b.children[nameNext]; ok { - b.switchToChild(childNext, pNext) - childNext.sendUpdate() - } - } - }) } } @@ -222,141 +206,57 @@ func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.S b.logger.Warningf("priority: child balancer not found for child %v, priority %v", childName, priority) return } - oldState := child.state.ConnectivityState + oldChildState := child.state child.state = s + // We start/stop the init timer of this child based on the new connectivity + // state. syncPriority() later will need the init timer (to check if it's + // nil or not) to decide which child to switch to. switch s.ConnectivityState { case connectivity.Ready, connectivity.Idle: - // Note that idle is also handled as if it's Ready. It will close the - // lower priorities (which will be kept in a cache, not deleted), and - // new picks will use the Idle picker. - b.handlePriorityWithNewStateReady(child, priority) + child.reportedTF = false + child.stopInitTimer() case connectivity.TransientFailure: - b.handlePriorityWithNewStateTransientFailure(child, priority) + child.reportedTF = true + child.stopInitTimer() case connectivity.Connecting: - b.handlePriorityWithNewStateConnecting(child, priority, oldState) + if !child.reportedTF { + child.startInitTimer() + } default: // New state is Shutdown, should never happen. Don't forward. } -} - -// handlePriorityWithNewStateReady handles state Ready from a higher or equal -// priority. -// -// An update with state Ready: -// - If it's from higher priority: -// - Switch to this priority -// - Forward the update -// - If it's from priorityInUse: -// - Forward only -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateReady(child *childBalancer, priority int) { - // If one priority higher or equal to priorityInUse goes Ready, stop the - // init timer. If update is from higher than priorityInUse, priorityInUse - // will be closed, and the init timer will become useless. - b.stopPriorityInitTimer() - - // priorityInUse is lower than this priority, switch to this. - if b.priorityInUse > priority { - b.logger.Infof("Switching priority from %v to %v, because latter became Ready", b.priorityInUse, priority) - b.switchToChild(child, priority) - } - // Forward the update since it's READY. - b.cc.UpdateState(child.state) -} -// handlePriorityWithNewStateTransientFailure handles state TransientFailure -// from a higher or equal priority. -// -// An update with state TransientFailure: -// - If it's from a higher priority: -// - Do not forward, and do nothing -// - If it's from priorityInUse: -// - If there's no lower: -// - Forward and do nothing else -// - If there's a lower priority: -// - Switch to the lower -// - Forward the lower child's state -// - Do NOT forward this update -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateTransientFailure(child *childBalancer, priority int) { - // priorityInUse is lower than this priority, do nothing. - if b.priorityInUse > priority { - return - } - // priorityInUse sends a failure. Stop its init timer. - b.stopPriorityInitTimer() - priorityNext := priority + 1 - if priorityNext >= len(b.priorities) { - // Forward this update. - b.cc.UpdateState(child.state) - return - } - b.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, priorityNext) - nameNext := b.priorities[priorityNext] - childNext := b.children[nameNext] - b.switchToChild(childNext, priorityNext) - b.cc.UpdateState(childNext.state) - childNext.sendUpdate() -} - -// handlePriorityWithNewStateConnecting handles state Connecting from a higher -// than or equal priority. -// -// An update with state Connecting: -// - If it's from a higher priority -// - Do nothing -// - If it's from priorityInUse, the behavior depends on previous state. -// -// When new state is Connecting, the behavior depends on previous state. If the -// previous state was Ready, this is a transition out from Ready to Connecting. -// Assuming there are multiple backends in the same priority, this mean we are -// in a bad situation and we should failover to the next priority (Side note: -// the current connectivity state aggregating algorithm (e.g. round-robin) is -// not handling this right, because if many backends all go from Ready to -// Connecting, the overall situation is more like TransientFailure, not -// Connecting). -// -// If the previous state was Idle, we don't do anything special with failure, -// and simply forward the update. The init timer should be in process, will -// handle failover if it timeouts. If the previous state was TransientFailure, -// we do not forward, because the lower priority is in use. -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateConnecting(child *childBalancer, priority int, oldState connectivity.State) { - // priorityInUse is lower than this priority, do nothing. - if b.priorityInUse > priority { - return - } - - switch oldState { - case connectivity.Ready: - // Handling transition from Ready to Connecting, is same as handling - // TransientFailure. There's no need to stop the init timer, because it - // should have been stopped when state turned Ready. - priorityNext := priority + 1 - if priorityNext >= len(b.priorities) { - // Forward this update. - b.cc.UpdateState(child.state) + oldPriorityInUse := b.priorityInUse + child.parent.syncPriority(false) + // If child is switched by syncPriority(), it also sends the update from the + // new child to overwrite the old picker used by the parent. + // + // But no update is sent if the child is not switches. That means if this + // update is from childInUse, and this child is still childInUse after + // syncing, the update being handled here is not sent to the parent. In that + // case, we need to do an explicit check here to forward the update. + if b.priorityInUse == oldPriorityInUse && b.priorityInUse == priority { + // Special handling for Connecting. If child was not switched, and this + // is a Connecting->Connecting transition, do not send the redundant + // update, since all Connecting pickers are the same (they tell the RPCs + // to repick). + // + // This can happen because the initial state of a child (before any + // update is received) is Connecting. When the child is started, it's + // picker is sent to the parent by syncPriority (to overwrite the old + // picker if there's any). When it reports Connecting after being + // started, it will send a Connecting update (handled here), causing a + // Connecting->Connecting transition. + if oldChildState.ConnectivityState == connectivity.Connecting && s.ConnectivityState == connectivity.Connecting { return } - b.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, priorityNext) - nameNext := b.priorities[priorityNext] - childNext := b.children[nameNext] - b.switchToChild(childNext, priorityNext) - b.cc.UpdateState(childNext.state) - childNext.sendUpdate() - case connectivity.Idle: + // Only forward this update if sync() didn't switch child, and this + // child is in use. + // + // sync() forwards the update if the child was switched, so there's no + // need to forward again. b.cc.UpdateState(child.state) - default: - // Old state is Connecting, TransientFailure or Shutdown. Don't forward. } + } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go index dcea6d46e5..ec3b560569 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go @@ -143,6 +143,8 @@ func (p *picker) handleTransientFailure(e *ringEntry) (balancer.PickResult, erro return balancer.PickResult{}, fmt.Errorf("no connection is Ready") } +// nextSkippingDuplicates finds the next entry in the ring, with a different +// subconn from the given entry. func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { for next := ring.next(entry); next != entry; next = ring.next(next) { if next.sc != entry.sc { @@ -152,3 +154,28 @@ func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { // There's no qualifying next entry. return nil } + +// nextSkippingDuplicatesSubConn finds the next subconn in the ring, that's +// different from the given subconn. +func nextSkippingDuplicatesSubConn(ring *ring, sc *subConn) *subConn { + var entry *ringEntry + for _, it := range ring.items { + if it.sc == sc { + entry = it + break + } + } + if entry == nil { + // If the given subconn is not in the ring (e.g. it was deleted), return + // the first one. + if len(ring.items) > 0 { + return ring.items[0].sc + } + return nil + } + ee := nextSkippingDuplicates(ring, entry) + if ee == nil { + return nil + } + return ee.sc +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go index f8a47f165b..4e9c1772b1 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go @@ -98,6 +98,10 @@ type subConn struct { // When connectivity state is updated to Idle for this SubConn, if // connectQueued is true, Connect() will be called on the SubConn. connectQueued bool + // attemptingToConnect indicates if this subconn is attempting to connect. + // It's set when queueConnect is called. It's unset when the state is + // changed to Ready/Shutdown, or Idle (and if connectQueued is false). + attemptingToConnect bool } // setState updates the state of this SubConn. @@ -113,6 +117,8 @@ func (sc *subConn) setState(s connectivity.State) { if sc.connectQueued { sc.connectQueued = false sc.sc.Connect() + } else { + sc.attemptingToConnect = false } case connectivity.Connecting: // Clear connectQueued if the SubConn isn't failing. This state @@ -122,11 +128,14 @@ func (sc *subConn) setState(s connectivity.State) { // Clear connectQueued if the SubConn isn't failing. This state // transition is unlikely to happen, but handle this just in case. sc.connectQueued = false + sc.attemptingToConnect = false // Set to a non-failing state. sc.failing = false case connectivity.TransientFailure: // Set to a failing state. sc.failing = true + case connectivity.Shutdown: + sc.attemptingToConnect = false } sc.state = s } @@ -149,6 +158,7 @@ func (sc *subConn) effectiveState() connectivity.State { func (sc *subConn) queueConnect() { sc.mu.Lock() defer sc.mu.Unlock() + sc.attemptingToConnect = true if sc.state == connectivity.Idle { sc.sc.Connect() return @@ -158,6 +168,12 @@ func (sc *subConn) queueConnect() { sc.connectQueued = true } +func (sc *subConn) isAttemptingToConnect() bool { + sc.mu.Lock() + defer sc.mu.Unlock() + return sc.attemptingToConnect +} + type ringhashBalancer struct { cc balancer.ClientConn logger *grpclog.PrefixLogger @@ -268,7 +284,8 @@ func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) err var err error b.ring, err = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize) if err != nil { - panic(err) + b.ResolverError(fmt.Errorf("ringhash failed to make a new ring: %v", err)) + return balancer.ErrBadResolverState } b.regeneratePicker() b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) @@ -334,12 +351,6 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance switch s { case connectivity.Idle: - // When the overall state is TransientFailure, this will never get picks - // if there's a lower priority. Need to keep the SubConns connecting so - // there's a chance it will recover. - if b.state == connectivity.TransientFailure { - scs.queueConnect() - } // No need to send an update. No queued RPC can be unblocked. If the // overall state changed because of this, sendUpdate is already true. case connectivity.Connecting: @@ -364,6 +375,35 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance if sendUpdate { b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } + + switch b.state { + case connectivity.Connecting, connectivity.TransientFailure: + // When overall state is TransientFailure, we need to make sure at least + // one SubConn is attempting to connect, otherwise this balancer may + // never get picks if the parent is priority. + // + // Because we report Connecting as the overall state when only one + // SubConn is in TransientFailure, we do the same check for Connecting + // here. + // + // Note that this check also covers deleting SubConns due to address + // change. E.g. if the SubConn attempting to connect is deleted, and the + // overall state is TF. Since there must be at least one SubConn + // attempting to connect, we need to trigger one. But since the deleted + // SubConn will eventually send a shutdown update, this code will run + // and trigger the next SubConn to connect. + for _, sc := range b.subConns { + if sc.isAttemptingToConnect() { + return + } + } + // Trigger a SubConn (this updated SubConn's next SubConn in the ring) + // to connect if nobody is attempting to connect. + sc := nextSkippingDuplicatesSubConn(b.ring, scs) + if sc != nil { + sc.queueConnect() + } + } } // mergeErrors builds an error from the last connection error and the last @@ -395,6 +435,7 @@ func (b *ringhashBalancer) Close() {} // // It's not thread safe. type connectivityStateEvaluator struct { + sum uint64 nums [5]uint64 } @@ -404,6 +445,7 @@ type connectivityStateEvaluator struct { // - If there is at least one subchannel in READY state, report READY. // - If there are 2 or more subchannels in TRANSIENT_FAILURE state, report TRANSIENT_FAILURE. // - If there is at least one subchannel in CONNECTING state, report CONNECTING. +// - If there is one subchannel in TRANSIENT_FAILURE and there is more than one subchannel, report state CONNECTING. // - If there is at least one subchannel in Idle state, report Idle. // - Otherwise, report TRANSIENT_FAILURE. // @@ -417,6 +459,14 @@ func (cse *connectivityStateEvaluator) recordTransition(oldState, newState conne updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. cse.nums[state] += updateVal } + if oldState == connectivity.Shutdown { + // There's technically no transition from Shutdown. But we record a + // Shutdown->Idle transition when a new SubConn is created. + cse.sum++ + } + if newState == connectivity.Shutdown { + cse.sum-- + } if cse.nums[connectivity.Ready] > 0 { return connectivity.Ready @@ -427,6 +477,9 @@ func (cse *connectivityStateEvaluator) recordTransition(oldState, newState conne if cse.nums[connectivity.Connecting] > 0 { return connectivity.Connecting } + if cse.nums[connectivity.TransientFailure] > 0 && cse.sum > 1 { + return connectivity.Connecting + } if cse.nums[connectivity.Idle] > 0 { return connectivity.Idle } diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go index 3db9be1cac..000927c541 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go @@ -58,6 +58,10 @@ type ldsConfig struct { // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. +// +// TODO(easwars): Make this function a method on the xdsResolver type. +// Currently, there is a single call site for this function, and all arguments +// passed to it are fields of the xdsResolver type. func watchService(c xdsclient.XDSClient, serviceName string, cb func(serviceUpdate, error), logger *grpclog.PrefixLogger) (cancel func()) { w := &serviceUpdateWatcher{ logger: logger, diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go index c4b147d21e..8a613c4c44 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go @@ -63,9 +63,8 @@ type xdsResolverBuilder struct { // // The xds bootstrap process is performed (and a new xds client is built) every // time an xds resolver is built. -func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (_ resolver.Resolver, retErr error) { +func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (_ resolver.Resolver, retErr error) { r := &xdsResolver{ - target: t, cc: cc, closed: grpcsync.NewEvent(), updateCh: make(chan suWithError, 1), @@ -77,7 +76,7 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op } }() r.logger = prefixLogger(r) - r.logger.Infof("Creating resolver for target: %+v", t) + r.logger.Infof("Creating resolver for target: %+v", target) newXDSClient := newXDSClient if b.newXDSClient != nil { @@ -115,7 +114,7 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op // - If authority is not set in the target, use the top level template // - If authority is set, use the template from the authority map. template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate - if authority := r.target.URL.Host; authority != "" { + if authority := target.URL.Host; authority != "" { a := bootstrapConfig.Authorities[authority] if a == nil { return nil, fmt.Errorf("xds: authority %q is not found in the bootstrap file", authority) @@ -127,19 +126,19 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op template = a.ClientListenerResourceNameTemplate } } - endpoint := r.target.URL.Path + endpoint := target.URL.Path if endpoint == "" { - endpoint = r.target.URL.Opaque + endpoint = target.URL.Opaque } endpoint = strings.TrimPrefix(endpoint, "/") - resourceName := bootstrap.PopulateResourceTemplate(template, endpoint) + r.ldsResourceName = bootstrap.PopulateResourceTemplate(template, endpoint) - // Register a watch on the xdsClient for the user's dial target. - cancelWatch := watchService(r.client, resourceName, r.handleServiceUpdate, r.logger) - r.logger.Infof("Watch started on resource name %v with xds-client %p", r.target.Endpoint, r.client) + // Register a watch on the xdsClient for the resource name determined above. + cancelWatch := watchService(r.client, r.ldsResourceName, r.handleServiceUpdate, r.logger) + r.logger.Infof("Watch started on resource name %v with xds-client %p", r.ldsResourceName, r.client) r.cancelWatch = func() { cancelWatch() - r.logger.Infof("Watch cancel on resource name %v with xds-client %p", r.target.Endpoint, r.client) + r.logger.Infof("Watch cancel on resource name %v with xds-client %p", r.ldsResourceName, r.client) } go r.run() @@ -165,11 +164,10 @@ type suWithError struct { // (which performs LDS/RDS queries for the same), and passes the received // updates to the ClientConn. type xdsResolver struct { - target resolver.Target - cc resolver.ClientConn - closed *grpcsync.Event - - logger *grpclog.PrefixLogger + cc resolver.ClientConn + closed *grpcsync.Event + logger *grpclog.PrefixLogger + ldsResourceName string // The underlying xdsClient which performs all xDS requests and responses. client xdsclient.XDSClient @@ -212,7 +210,7 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { r.cc.ReportError(err) return false } - r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.target.Endpoint, r.client, pretty.FormatJSON(sc)) + r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.ldsResourceName, r.client, pretty.FormatJSON(sc)) // Send the update to the ClientConn. state := iresolver.SetConfigSelector(resolver.State{ @@ -231,7 +229,7 @@ func (r *xdsResolver) run() { return case update := <-r.updateCh: if update.err != nil { - r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.target.Endpoint, r.client, update.err) + r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.ldsResourceName, r.client, update.err) if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { // If error is resource-not-found, it means the LDS // resource was removed. Ultimately send an empty service @@ -259,7 +257,7 @@ func (r *xdsResolver) run() { // Create the config selector for this update. cs, err := r.newConfigSelector(update.su) if err != nil { - r.logger.Warningf("Error parsing update on resource %v from xds-client %p: %v", r.target.Endpoint, r.client, err) + r.logger.Warningf("Error parsing update on resource %v from xds-client %p: %v", r.ldsResourceName, r.client, err) r.cc.ReportError(err) continue } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go index cfe6fc865a..9bc4588c14 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -55,7 +55,7 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref fun config = cfg.XDSServer } - a, err := c.newAuthority(config) + a, err := c.newAuthorityLocked(config) if err != nil { return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) } @@ -73,14 +73,14 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref fun return a, func() { c.unrefAuthority(a) }, nil } -// newAuthority creates a new authority for the config. But before that, it +// newAuthorityLocked creates a new authority for the config. But before that, it // checks the cache to see if an authority for this config already exists. // // The caller must take a reference of the returned authority before using, and // unref afterwards. // // caller must hold c.authorityMu -func (c *clientImpl) newAuthority(config *bootstrap.ServerConfig) (_ *authority, retErr error) { +func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *authority, retErr error) { // First check if there's already an authority for this config. If found, it // means this authority is used by other watches (could be the same // authority name, or a different authority name but the same server diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/loadreport.go index 32c7e9c9d7..cba5afd454 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/loadreport.go @@ -28,7 +28,9 @@ import ( // It returns a Store for the user to report loads, a function to cancel the // load reporting stream. func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) { - a, err := c.newAuthority(server) + c.authorityMu.Lock() + a, err := c.newAuthorityLocked(server) + c.authorityMu.Unlock() if err != nil { c.logger.Infof("xds: failed to connect to the control plane to do load reporting for authority %q: %v", server, err) return nil, func() {} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go index b61a80b429..d459717acd 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go @@ -76,7 +76,7 @@ type OutlierDetection struct { // ejected due to outlier detection. Defaults to 10% but will eject at least // one host regardless of the value. MaxEjectionPercent uint32 - // SuccessRateStddevFactor is used to determine the ejection threshold for + // SuccessRateStdevFactor is used to determine the ejection threshold for // success rate outlier ejection. The ejection threshold is the difference // between the mean success rate, and the product of this factor and the // standard deviation of the mean success rate: mean - (stdev * diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index 147870cdf6..9eb7117d9a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -107,7 +107,7 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) } - priorities := make(map[uint32]struct{}) + priorities := make(map[uint32]map[string]bool) for _, locality := range m.Endpoints { l := locality.GetLocality() if l == nil { @@ -119,7 +119,16 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, SubZone: l.SubZone, } priority := locality.GetPriority() - priorities[priority] = struct{}{} + localitiesWithPriority := priorities[priority] + if localitiesWithPriority == nil { + localitiesWithPriority = make(map[string]bool) + priorities[priority] = localitiesWithPriority + } + lidStr, _ := lid.ToString() + if localitiesWithPriority[lidStr] { + return EndpointsUpdate{}, fmt.Errorf("duplicate locality %s with the same priority %v", lidStr, priority) + } + localitiesWithPriority[lidStr] = true ret.Localities = append(ret.Localities, Locality{ ID: lid, Endpoints: parseEndpoints(locality.GetLbEndpoints()), diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go index df36e3a30f..0173b6982e 100644 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -100,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t { if p.event.typ != yaml_NO_EVENT { return p.event.typ } - if !yaml_parser_parse(&p.parser, &p.event) { + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { p.fail() } return p.event.typ @@ -320,6 +323,8 @@ type decoder struct { decodeCount int aliasCount int aliasDepth int + + mergedFields map[interface{}]bool } var ( @@ -808,6 +813,11 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) @@ -815,11 +825,18 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } for i := 0; i < l; i += 2 { if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } k := reflect.New(kt).Elem() if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() @@ -833,6 +850,12 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + d.stringMapType = stringMapType d.generalMapType = generalMapType return true @@ -844,7 +867,8 @@ func isStringMap(n *Node) bool { } l := len(n.Content) for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { return false } } @@ -861,7 +885,6 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { var elemType reflect.Type if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) elemType = inlineMap.Type().Elem() } @@ -870,6 +893,9 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.prepare(n, field) } + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node var doneFields []bool if d.uniqueKeys { doneFields = make([]bool, len(sinfo.FieldsList)) @@ -879,13 +905,20 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { for i := 0; i < l; i += 2 { ni := n.Content[i] if isMerge(ni) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } if !d.unmarshal(ni, name) { continue } - if info, ok := sinfo.FieldsMap[name.String()]; ok { + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { if d.uniqueKeys { if doneFields[info.Id] { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) @@ -911,6 +944,11 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } return true } @@ -918,19 +956,29 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { case MappingNode: - d.unmarshal(n, out) + d.unmarshal(merge, out) case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { + if merge.Alias != nil && merge.Alias.Kind != MappingNode { failWantMap() } - d.unmarshal(n, out) + d.unmarshal(merge, out) case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] if ni.Kind == AliasNode { if ni.Alias != nil && ni.Alias.Kind != MappingNode { failWantMap() @@ -943,6 +991,8 @@ func (d *decoder) merge(n *Node, out reflect.Value) { default: failWantMap() } + + d.mergedFields = mergedFields } func isMerge(n *Node) bool { diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index ac66fccc05..268558a0d6 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -687,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -786,7 +789,7 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { } token := peek_token(parser) - if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { return } @@ -813,6 +816,9 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -922,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } diff --git a/vendor/modules.txt b/vendor/modules.txt index a31fe3158f..7a57cb5fd5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -29,7 +29,7 @@ contrib.go.opencensus.io/exporter/prometheus ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm -# github.com/GoogleCloudPlatform/declarative-resource-client-library v1.15.2 +# github.com/GoogleCloudPlatform/declarative-resource-client-library v1.16.0 ## explicit; go 1.16 github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations @@ -288,7 +288,7 @@ github.com/exponent-io/jsonpath # github.com/fatih/camelcase v1.0.0 ## explicit github.com/fatih/camelcase -# github.com/fatih/color v1.12.0 +# github.com/fatih/color v1.13.0 ## explicit; go 1.13 github.com/fatih/color # github.com/fsnotify/fsnotify v1.5.1 @@ -481,23 +481,23 @@ github.com/hashicorp/go-cty/cty/gocty github.com/hashicorp/go-cty/cty/json github.com/hashicorp/go-cty/cty/msgpack github.com/hashicorp/go-cty/cty/set -# github.com/hashicorp/go-hclog v1.2.0 +# github.com/hashicorp/go-hclog v1.2.1 ## explicit; go 1.13 github.com/hashicorp/go-hclog # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.4.3 -## explicit; go 1.13 +# github.com/hashicorp/go-plugin v1.4.4 +## explicit; go 1.17 github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/plugin # github.com/hashicorp/go-uuid v1.0.3 ## explicit github.com/hashicorp/go-uuid -# github.com/hashicorp/go-version v1.4.0 +# github.com/hashicorp/go-version v1.6.0 ## explicit github.com/hashicorp/go-version -# github.com/hashicorp/hc-install v0.3.2 +# github.com/hashicorp/hc-install v0.4.0 ## explicit; go 1.16 github.com/hashicorp/hc-install github.com/hashicorp/hc-install/checkpoint @@ -521,8 +521,8 @@ github.com/hashicorp/hcl/hcl/printer github.com/hashicorp/hcl/hcl/scanner github.com/hashicorp/hcl/hcl/strconv github.com/hashicorp/hcl/hcl/token -# github.com/hashicorp/hcl/v2 v2.12.0 -## explicit; go 1.12 +# github.com/hashicorp/hcl/v2 v2.13.0 +## explicit; go 1.18 github.com/hashicorp/hcl/v2 github.com/hashicorp/hcl/v2/ext/customdecode github.com/hashicorp/hcl/v2/hclsyntax @@ -530,34 +530,38 @@ github.com/hashicorp/hcl/v2/hclwrite # github.com/hashicorp/logutils v1.0.0 ## explicit github.com/hashicorp/logutils -# github.com/hashicorp/terraform-exec v0.16.1 +# github.com/hashicorp/terraform-exec v0.17.2 ## explicit; go 1.17 github.com/hashicorp/terraform-exec/internal/version github.com/hashicorp/terraform-exec/tfexec -# github.com/hashicorp/terraform-json v0.13.0 +# github.com/hashicorp/terraform-json v0.14.0 ## explicit; go 1.13 github.com/hashicorp/terraform-json -# github.com/hashicorp/terraform-plugin-go v0.9.0 +# github.com/hashicorp/terraform-plugin-go v0.10.0 ## explicit; go 1.17 github.com/hashicorp/terraform-plugin-go/internal/logging github.com/hashicorp/terraform-plugin-go/tfprotov5 +github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto +github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5 github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server github.com/hashicorp/terraform-plugin-go/tfprotov6 +github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto +github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6 github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server github.com/hashicorp/terraform-plugin-go/tftypes -# github.com/hashicorp/terraform-plugin-log v0.4.0 +# github.com/hashicorp/terraform-plugin-log v0.4.1 ## explicit; go 1.17 github.com/hashicorp/terraform-plugin-log/internal/hclogutils github.com/hashicorp/terraform-plugin-log/internal/logging github.com/hashicorp/terraform-plugin-log/tflog github.com/hashicorp/terraform-plugin-log/tfsdklog -# github.com/hashicorp/terraform-plugin-sdk/v2 v2.16.0 +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.18.0 ## explicit; go 1.17 github.com/hashicorp/terraform-plugin-sdk/v2/diag github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff @@ -579,10 +583,10 @@ github.com/hashicorp/terraform-plugin-sdk/v2/meta github.com/hashicorp/terraform-plugin-sdk/v2/plugin github.com/hashicorp/terraform-plugin-sdk/v2/terraform # github.com/hashicorp/terraform-provider-google-beta v3.73.0+incompatible => ./third_party/github.com/hashicorp/terraform-provider-google-beta -## explicit; go 1.16 +## explicit; go 1.18 github.com/hashicorp/terraform-provider-google-beta/google-beta github.com/hashicorp/terraform-provider-google-beta/version -# github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896 +# github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c ## explicit; go 1.14 github.com/hashicorp/terraform-registry-address # github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 @@ -627,10 +631,10 @@ github.com/liggitt/tabwriter github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter -# github.com/mattn/go-colorable v0.1.8 +# github.com/mattn/go-colorable v0.1.12 ## explicit; go 1.13 github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.12 +# github.com/mattn/go-isatty v0.0.14 ## explicit; go 1.12 github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.9 @@ -758,7 +762,7 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/testify v1.7.0 +# github.com/stretchr/testify v1.7.2 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require @@ -832,7 +836,7 @@ go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore -# golang.org/x/crypto v0.0.0-20220214200702-86341886e292 +# golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 ## explicit; go 1.17 golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 @@ -1032,7 +1036,7 @@ google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.46.2 +# google.golang.org/grpc v1.47.0 ## explicit; go 1.14 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1123,6 +1127,7 @@ google.golang.org/grpc/xds/internal/balancer/clusterimpl google.golang.org/grpc/xds/internal/balancer/clustermanager google.golang.org/grpc/xds/internal/balancer/clusterresolver google.golang.org/grpc/xds/internal/balancer/loadstore +google.golang.org/grpc/xds/internal/balancer/outlierdetection google.golang.org/grpc/xds/internal/balancer/priority google.golang.org/grpc/xds/internal/balancer/ringhash google.golang.org/grpc/xds/internal/clusterspecifier @@ -1190,7 +1195,7 @@ gopkg.in/warnings.v0 # gopkg.in/yaml.v2 v2.4.0 ## explicit; go 1.15 gopkg.in/yaml.v2 -# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b +# gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 # k8s.io/api v0.24.2